1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "FastMalloc.h"
80 #include "Assertions.h"
81 #if ENABLE(JSC_MULTIPLE_THREADS)
85 #ifndef NO_TCMALLOC_SAMPLES
87 #define NO_TCMALLOC_SAMPLES
91 #if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
92 #define FORCE_SYSTEM_MALLOC 0
94 #define FORCE_SYSTEM_MALLOC 1
97 #define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC))
102 #if ENABLE(JSC_MULTIPLE_THREADS)
103 static pthread_key_t isForbiddenKey
;
104 static pthread_once_t isForbiddenKeyOnce
= PTHREAD_ONCE_INIT
;
105 static void initializeIsForbiddenKey()
107 pthread_key_create(&isForbiddenKey
, 0);
110 static bool isForbidden()
112 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
113 return !!pthread_getspecific(isForbiddenKey
);
116 void fastMallocForbid()
118 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
119 pthread_setspecific(isForbiddenKey
, &isForbiddenKey
);
122 void fastMallocAllow()
124 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
125 pthread_setspecific(isForbiddenKey
, 0);
130 static bool staticIsForbidden
;
131 static bool isForbidden()
133 return staticIsForbidden
;
136 void fastMallocForbid()
138 staticIsForbidden
= true;
141 void fastMallocAllow()
143 staticIsForbidden
= false;
145 #endif // ENABLE(JSC_MULTIPLE_THREADS)
154 void* fastZeroedMalloc(size_t n
)
156 void* result
= fastMalloc(n
);
157 memset(result
, 0, n
);
161 void* tryFastZeroedMalloc(size_t n
)
163 void* result
= tryFastMalloc(n
);
166 memset(result
, 0, n
);
172 #if FORCE_SYSTEM_MALLOC
175 #if !PLATFORM(WIN_OS)
183 void* tryFastMalloc(size_t n
)
185 ASSERT(!isForbidden());
189 void* fastMalloc(size_t n
)
191 ASSERT(!isForbidden());
192 void* result
= malloc(n
);
198 void* tryFastCalloc(size_t n_elements
, size_t element_size
)
200 ASSERT(!isForbidden());
201 return calloc(n_elements
, element_size
);
204 void* fastCalloc(size_t n_elements
, size_t element_size
)
206 ASSERT(!isForbidden());
207 void* result
= calloc(n_elements
, element_size
);
213 void fastFree(void* p
)
215 ASSERT(!isForbidden());
219 void* tryFastRealloc(void* p
, size_t n
)
221 ASSERT(!isForbidden());
222 return realloc(p
, n
);
225 void* fastRealloc(void* p
, size_t n
)
227 ASSERT(!isForbidden());
228 void* result
= realloc(p
, n
);
234 void releaseFastMallocFreeMemory() { }
236 FastMallocStatistics
fastMallocStatistics()
238 FastMallocStatistics statistics
= { 0, 0, 0, 0 };
245 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
246 // It will never be used in this case, so it's type and value are less interesting than its presence.
247 extern "C" const int jscore_fastmalloc_introspection
= 0;
250 #else // FORCE_SYSTEM_MALLOC
254 #elif HAVE(INTTYPES_H)
255 #include <inttypes.h>
257 #include <sys/types.h>
260 #include "AlwaysInline.h"
261 #include "Assertions.h"
262 #include "TCPackedCache.h"
263 #include "TCPageMap.h"
264 #include "TCSpinLock.h"
265 #include "TCSystemAlloc.h"
274 #ifndef WIN32_LEAN_AND_MEAN
275 #define WIN32_LEAN_AND_MEAN
283 #include "MallocZoneSupport.h"
284 #include <wtf/HashSet.h>
291 // Calling pthread_getspecific through a global function pointer is faster than a normal
292 // call to the function on Mac OS X, and it's used in performance-critical code. So we
293 // use a function pointer. But that's not necessarily faster on other platforms, and we had
294 // problems with this technique on Windows, so we'll do this only on Mac OS X.
296 static void* (*pthread_getspecific_function_pointer
)(pthread_key_t
) = pthread_getspecific
;
297 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
300 #define DEFINE_VARIABLE(type, name, value, meaning) \
301 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
302 type FLAGS_##name(value); \
303 char FLAGS_no##name; \
305 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
307 #define DEFINE_int64(name, value, meaning) \
308 DEFINE_VARIABLE(int64_t, name, value, meaning)
310 #define DEFINE_double(name, value, meaning) \
311 DEFINE_VARIABLE(double, name, value, meaning)
315 #define malloc fastMalloc
316 #define calloc fastCalloc
317 #define free fastFree
318 #define realloc fastRealloc
320 #define MESSAGE LOG_ERROR
321 #define CHECK_CONDITION ASSERT
324 class TCMalloc_PageHeap
;
325 class TCMalloc_ThreadCache
;
326 class TCMalloc_Central_FreeListPadded
;
328 class FastMallocZone
{
332 static kern_return_t
enumerate(task_t
, void*, unsigned typeMmask
, vm_address_t zoneAddress
, memory_reader_t
, vm_range_recorder_t
);
333 static size_t goodSize(malloc_zone_t
*, size_t size
) { return size
; }
334 static boolean_t
check(malloc_zone_t
*) { return true; }
335 static void print(malloc_zone_t
*, boolean_t
) { }
336 static void log(malloc_zone_t
*, void*) { }
337 static void forceLock(malloc_zone_t
*) { }
338 static void forceUnlock(malloc_zone_t
*) { }
339 static void statistics(malloc_zone_t
*, malloc_statistics_t
* stats
) { memset(stats
, 0, sizeof(malloc_statistics_t
)); }
342 FastMallocZone(TCMalloc_PageHeap
*, TCMalloc_ThreadCache
**, TCMalloc_Central_FreeListPadded
*);
343 static size_t size(malloc_zone_t
*, const void*);
344 static void* zoneMalloc(malloc_zone_t
*, size_t);
345 static void* zoneCalloc(malloc_zone_t
*, size_t numItems
, size_t size
);
346 static void zoneFree(malloc_zone_t
*, void*);
347 static void* zoneRealloc(malloc_zone_t
*, void*, size_t);
348 static void* zoneValloc(malloc_zone_t
*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
349 static void zoneDestroy(malloc_zone_t
*) { }
351 malloc_zone_t m_zone
;
352 TCMalloc_PageHeap
* m_pageHeap
;
353 TCMalloc_ThreadCache
** m_threadHeaps
;
354 TCMalloc_Central_FreeListPadded
* m_centralCaches
;
362 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
363 // you're porting to a system where you really can't get a stacktrace.
364 #ifdef NO_TCMALLOC_SAMPLES
365 // We use #define so code compiles even if you #include stacktrace.h somehow.
366 # define GetStackTrace(stack, depth, skip) (0)
368 # include <google/stacktrace.h>
372 // Even if we have support for thread-local storage in the compiler
373 // and linker, the OS may not support it. We need to check that at
374 // runtime. Right now, we have to keep a manual set of "bad" OSes.
375 #if defined(HAVE_TLS)
376 static bool kernel_supports_tls
= false; // be conservative
377 static inline bool KernelSupportsTLS() {
378 return kernel_supports_tls
;
380 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
381 static void CheckIfKernelSupportsTLS() {
382 kernel_supports_tls
= false;
385 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
386 static void CheckIfKernelSupportsTLS() {
388 if (uname(&buf
) != 0) { // should be impossible
389 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno
);
390 kernel_supports_tls
= false;
391 } else if (strcasecmp(buf
.sysname
, "linux") == 0) {
392 // The linux case: the first kernel to support TLS was 2.6.0
393 if (buf
.release
[0] < '2' && buf
.release
[1] == '.') // 0.x or 1.x
394 kernel_supports_tls
= false;
395 else if (buf
.release
[0] == '2' && buf
.release
[1] == '.' &&
396 buf
.release
[2] >= '0' && buf
.release
[2] < '6' &&
397 buf
.release
[3] == '.') // 2.0 - 2.5
398 kernel_supports_tls
= false;
400 kernel_supports_tls
= true;
401 } else { // some other kernel, we'll be optimisitic
402 kernel_supports_tls
= true;
404 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
406 # endif // HAVE_DECL_UNAME
409 // __THROW is defined in glibc systems. It means, counter-intuitively,
410 // "This function will never throw an exception." It's an optional
411 // optimization tool, but we may need to use it to match glibc prototypes.
412 #ifndef __THROW // I guess we're not on a glibc system
413 # define __THROW // __THROW is just an optimization, so ok to make it ""
416 //-------------------------------------------------------------------
418 //-------------------------------------------------------------------
420 // Not all possible combinations of the following parameters make
421 // sense. In particular, if kMaxSize increases, you may have to
422 // increase kNumClasses as well.
423 static const size_t kPageShift
= 12;
424 static const size_t kPageSize
= 1 << kPageShift
;
425 static const size_t kMaxSize
= 8u * kPageSize
;
426 static const size_t kAlignShift
= 3;
427 static const size_t kAlignment
= 1 << kAlignShift
;
428 static const size_t kNumClasses
= 68;
430 // Allocates a big block of memory for the pagemap once we reach more than
432 static const size_t kPageMapBigAllocationThreshold
= 128 << 20;
434 // Minimum number of pages to fetch from system at a time. Must be
435 // significantly bigger than kBlockSize to amortize system-call
436 // overhead, and also to reduce external fragementation. Also, we
437 // should keep this value big because various incarnations of Linux
438 // have small limits on the number of mmap() regions per
440 static const size_t kMinSystemAlloc
= 1 << (20 - kPageShift
);
442 // Number of objects to move between a per-thread list and a central
443 // list in one shot. We want this to be not too small so we can
444 // amortize the lock overhead for accessing the central list. Making
445 // it too big may temporarily cause unnecessary memory wastage in the
446 // per-thread free list until the scavenger cleans up the list.
447 static int num_objects_to_move
[kNumClasses
];
449 // Maximum length we allow a per-thread free-list to have before we
450 // move objects from it into the corresponding central free-list. We
451 // want this big to avoid locking the central free-list too often. It
452 // should not hurt to make this list somewhat big because the
453 // scavenging code will shrink it down when its contents are not in use.
454 static const int kMaxFreeListLength
= 256;
456 // Lower and upper bounds on the per-thread cache sizes
457 static const size_t kMinThreadCacheSize
= kMaxSize
* 2;
458 static const size_t kMaxThreadCacheSize
= 512 * 1024;
460 // Default bound on the total amount of thread caches
461 static const size_t kDefaultOverallThreadCacheSize
= 16 << 20;
463 // For all span-lengths < kMaxPages we keep an exact-size list.
464 // REQUIRED: kMaxPages >= kMinSystemAlloc;
465 static const size_t kMaxPages
= kMinSystemAlloc
;
467 /* The smallest prime > 2^n */
468 static int primes_list
[] = {
469 // Small values might cause high rates of sampling
470 // and hence commented out.
471 // 2, 5, 11, 17, 37, 67, 131, 257,
472 // 521, 1031, 2053, 4099, 8209, 16411,
473 32771, 65537, 131101, 262147, 524309, 1048583,
474 2097169, 4194319, 8388617, 16777259, 33554467 };
476 // Twice the approximate gap between sampling actions.
477 // I.e., we take one sample approximately once every
478 // tcmalloc_sample_parameter/2
479 // bytes of allocation, i.e., ~ once every 128KB.
480 // Must be a prime number.
481 #ifdef NO_TCMALLOC_SAMPLES
482 DEFINE_int64(tcmalloc_sample_parameter
, 0,
483 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
484 static size_t sample_period
= 0;
486 DEFINE_int64(tcmalloc_sample_parameter
, 262147,
487 "Twice the approximate gap between sampling actions."
488 " Must be a prime number. Otherwise will be rounded up to a "
489 " larger prime number");
490 static size_t sample_period
= 262147;
493 // Protects sample_period above
494 static SpinLock sample_period_lock
= SPINLOCK_INITIALIZER
;
496 // Parameters for controlling how fast memory is returned to the OS.
498 DEFINE_double(tcmalloc_release_rate
, 1,
499 "Rate at which we release unused memory to the system. "
500 "Zero means we never release memory back to the system. "
501 "Increase this flag to return memory faster; decrease it "
502 "to return memory slower. Reasonable rates are in the "
505 //-------------------------------------------------------------------
506 // Mapping from size to size_class and vice versa
507 //-------------------------------------------------------------------
509 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
510 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
511 // So for these larger sizes we have an array indexed by ceil(size/128).
513 // We flatten both logical arrays into one physical array and use
514 // arithmetic to compute an appropriate index. The constants used by
515 // ClassIndex() were selected to make the flattening work.
518 // Size Expression Index
519 // -------------------------------------------------------
523 // 1024 (1024 + 7) / 8 128
524 // 1025 (1025 + 127 + (120<<7)) / 128 129
526 // 32768 (32768 + 127 + (120<<7)) / 128 376
527 static const size_t kMaxSmallSize
= 1024;
528 static const int shift_amount
[2] = { 3, 7 }; // For divides by 8 or 128
529 static const int add_amount
[2] = { 7, 127 + (120 << 7) };
530 static unsigned char class_array
[377];
532 // Compute index of the class_array[] entry for a given size
533 static inline int ClassIndex(size_t s
) {
534 const int i
= (s
> kMaxSmallSize
);
535 return static_cast<int>((s
+ add_amount
[i
]) >> shift_amount
[i
]);
538 // Mapping from size class to max size storable in that class
539 static size_t class_to_size
[kNumClasses
];
541 // Mapping from size class to number of pages to allocate at a time
542 static size_t class_to_pages
[kNumClasses
];
544 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
545 // back and forth between thread caches and the central cache for a given size
548 void *head
; // Head of chain of objects.
549 void *tail
; // Tail of chain of objects.
551 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
552 // slots to put link list chains into. To keep memory usage bounded the total
553 // number of TCEntries across size classes is fixed. Currently each size
554 // class is initially given one TCEntry which also means that the maximum any
555 // one class can have is kNumClasses.
556 static const int kNumTransferEntries
= kNumClasses
;
558 // Note: the following only works for "n"s that fit in 32-bits, but
559 // that is fine since we only use it for small sizes.
560 static inline int LgFloor(size_t n
) {
562 for (int i
= 4; i
>= 0; --i
) {
563 int shift
= (1 << i
);
564 size_t x
= n
>> shift
;
574 // Some very basic linked list functions for dealing with using void * as
577 static inline void *SLL_Next(void *t
) {
578 return *(reinterpret_cast<void**>(t
));
581 static inline void SLL_SetNext(void *t
, void *n
) {
582 *(reinterpret_cast<void**>(t
)) = n
;
585 static inline void SLL_Push(void **list
, void *element
) {
586 SLL_SetNext(element
, *list
);
590 static inline void *SLL_Pop(void **list
) {
591 void *result
= *list
;
592 *list
= SLL_Next(*list
);
597 // Remove N elements from a linked list to which head points. head will be
598 // modified to point to the new head. start and end will point to the first
599 // and last nodes of the range. Note that end will point to NULL after this
600 // function is called.
601 static inline void SLL_PopRange(void **head
, int N
, void **start
, void **end
) {
609 for (int i
= 1; i
< N
; ++i
) {
615 *head
= SLL_Next(tmp
);
616 // Unlink range from list.
617 SLL_SetNext(tmp
, NULL
);
620 static inline void SLL_PushRange(void **head
, void *start
, void *end
) {
622 SLL_SetNext(end
, *head
);
626 static inline size_t SLL_Size(void *head
) {
630 head
= SLL_Next(head
);
635 // Setup helper functions.
637 static ALWAYS_INLINE
size_t SizeClass(size_t size
) {
638 return class_array
[ClassIndex(size
)];
641 // Get the byte-size for a specified class
642 static ALWAYS_INLINE
size_t ByteSizeForClass(size_t cl
) {
643 return class_to_size
[cl
];
645 static int NumMoveSize(size_t size
) {
646 if (size
== 0) return 0;
647 // Use approx 64k transfers between thread and central caches.
648 int num
= static_cast<int>(64.0 * 1024.0 / size
);
649 if (num
< 2) num
= 2;
650 // Clamp well below kMaxFreeListLength to avoid ping pong between central
651 // and thread caches.
652 if (num
> static_cast<int>(0.8 * kMaxFreeListLength
))
653 num
= static_cast<int>(0.8 * kMaxFreeListLength
);
655 // Also, avoid bringing in too many objects into small object free
656 // lists. There are lots of such lists, and if we allow each one to
657 // fetch too many at a time, we end up having to scavenge too often
658 // (especially when there are lots of threads and each thread gets a
659 // small allowance for its thread cache).
661 // TODO: Make thread cache free list sizes dynamic so that we do not
662 // have to equally divide a fixed resource amongst lots of threads.
663 if (num
> 32) num
= 32;
668 // Initialize the mapping arrays
669 static void InitSizeClasses() {
670 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
671 if (ClassIndex(0) < 0) {
672 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
675 if (static_cast<size_t>(ClassIndex(kMaxSize
)) >= sizeof(class_array
)) {
676 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize
));
680 // Compute the size classes we want to use
681 size_t sc
= 1; // Next size class to assign
682 unsigned char alignshift
= kAlignShift
;
684 for (size_t size
= kAlignment
; size
<= kMaxSize
; size
+= (1 << alignshift
)) {
685 int lg
= LgFloor(size
);
687 // Increase alignment every so often.
689 // Since we double the alignment every time size doubles and
690 // size >= 128, this means that space wasted due to alignment is
691 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
692 // bytes, so the space wasted as a percentage starts falling for
694 if ((lg
>= 7) && (alignshift
< 8)) {
700 // Allocate enough pages so leftover is less than 1/8 of total.
701 // This bounds wasted space to at most 12.5%.
702 size_t psize
= kPageSize
;
703 while ((psize
% size
) > (psize
>> 3)) {
706 const size_t my_pages
= psize
>> kPageShift
;
708 if (sc
> 1 && my_pages
== class_to_pages
[sc
-1]) {
709 // See if we can merge this into the previous class without
710 // increasing the fragmentation of the previous class.
711 const size_t my_objects
= (my_pages
<< kPageShift
) / size
;
712 const size_t prev_objects
= (class_to_pages
[sc
-1] << kPageShift
)
713 / class_to_size
[sc
-1];
714 if (my_objects
== prev_objects
) {
715 // Adjust last class to include this size
716 class_to_size
[sc
-1] = size
;
722 class_to_pages
[sc
] = my_pages
;
723 class_to_size
[sc
] = size
;
726 if (sc
!= kNumClasses
) {
727 MESSAGE("wrong number of size classes: found %" PRIuS
" instead of %d\n",
728 sc
, int(kNumClasses
));
732 // Initialize the mapping arrays
734 for (unsigned char c
= 1; c
< kNumClasses
; c
++) {
735 const size_t max_size_in_class
= class_to_size
[c
];
736 for (size_t s
= next_size
; s
<= max_size_in_class
; s
+= kAlignment
) {
737 class_array
[ClassIndex(s
)] = c
;
739 next_size
= static_cast<int>(max_size_in_class
+ kAlignment
);
742 // Double-check sizes just to be safe
743 for (size_t size
= 0; size
<= kMaxSize
; size
++) {
744 const size_t sc
= SizeClass(size
);
746 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
749 if (sc
> 1 && size
<= class_to_size
[sc
-1]) {
750 MESSAGE("Allocating unnecessarily large class %" PRIuS
" for %" PRIuS
754 if (sc
>= kNumClasses
) {
755 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
758 const size_t s
= class_to_size
[sc
];
760 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
764 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
769 // Initialize the num_objects_to_move array.
770 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
771 num_objects_to_move
[cl
] = NumMoveSize(ByteSizeForClass(cl
));
776 // Dump class sizes and maximum external wastage per size class
777 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
778 const int alloc_size
= class_to_pages
[cl
] << kPageShift
;
779 const int alloc_objs
= alloc_size
/ class_to_size
[cl
];
780 const int min_used
= (class_to_size
[cl
-1] + 1) * alloc_objs
;
781 const int max_waste
= alloc_size
- min_used
;
782 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
784 int(class_to_size
[cl
-1] + 1),
785 int(class_to_size
[cl
]),
786 int(class_to_pages
[cl
] << kPageShift
),
787 max_waste
* 100.0 / alloc_size
794 // -------------------------------------------------------------------------
795 // Simple allocator for objects of a specified type. External locking
796 // is required before accessing one of these objects.
797 // -------------------------------------------------------------------------
799 // Metadata allocator -- keeps stats about how many bytes allocated
800 static uint64_t metadata_system_bytes
= 0;
801 static void* MetaDataAlloc(size_t bytes
) {
802 void* result
= TCMalloc_SystemAlloc(bytes
, 0);
803 if (result
!= NULL
) {
804 metadata_system_bytes
+= bytes
;
810 class PageHeapAllocator
{
812 // How much to allocate from system at a time
813 static const size_t kAllocIncrement
= 32 << 10;
816 static const size_t kAlignedSize
817 = (((sizeof(T
) + kAlignment
- 1) / kAlignment
) * kAlignment
);
819 // Free area from which to carve new objects
823 // Free list of already carved objects
826 // Number of allocated but unfreed objects
831 ASSERT(kAlignedSize
<= kAllocIncrement
);
841 if (free_list_
!= NULL
) {
843 free_list_
= *(reinterpret_cast<void**>(result
));
845 if (free_avail_
< kAlignedSize
) {
847 free_area_
= reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement
));
848 if (free_area_
== NULL
) CRASH();
849 free_avail_
= kAllocIncrement
;
852 free_area_
+= kAlignedSize
;
853 free_avail_
-= kAlignedSize
;
856 return reinterpret_cast<T
*>(result
);
860 *(reinterpret_cast<void**>(p
)) = free_list_
;
865 int inuse() const { return inuse_
; }
868 // -------------------------------------------------------------------------
869 // Span - a contiguous run of pages
870 // -------------------------------------------------------------------------
872 // Type that can hold a page number
873 typedef uintptr_t PageID
;
875 // Type that can hold the length of a run of pages
876 typedef uintptr_t Length
;
878 static const Length kMaxValidPages
= (~static_cast<Length
>(0)) >> kPageShift
;
880 // Convert byte size into pages. This won't overflow, but may return
881 // an unreasonably large value if bytes is huge enough.
882 static inline Length
pages(size_t bytes
) {
883 return (bytes
>> kPageShift
) +
884 ((bytes
& (kPageSize
- 1)) > 0 ? 1 : 0);
887 // Convert a user size into the number of bytes that will actually be
889 static size_t AllocationSize(size_t bytes
) {
890 if (bytes
> kMaxSize
) {
891 // Large object: we allocate an integral number of pages
892 ASSERT(bytes
<= (kMaxValidPages
<< kPageShift
));
893 return pages(bytes
) << kPageShift
;
895 // Small object: find the size class to which it belongs
896 return ByteSizeForClass(SizeClass(bytes
));
900 // Information kept for a span (a contiguous run of pages).
902 PageID start
; // Starting page number
903 Length length
; // Number of pages in span
904 Span
* next
; // Used when in link list
905 Span
* prev
; // Used when in link list
906 void* objects
; // Linked list of free objects
907 unsigned int free
: 1; // Is the span free
908 #ifndef NO_TCMALLOC_SAMPLES
909 unsigned int sample
: 1; // Sampled object?
911 unsigned int sizeclass
: 8; // Size-class for small objects (or 0)
912 unsigned int refcount
: 11; // Number of non-free objects
913 bool decommitted
: 1;
917 // For debugging, we can keep a log events per span
924 #if TCMALLOC_TRACK_DECOMMITED_SPANS
925 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
927 #define ASSERT_SPAN_COMMITTED(span)
931 void Event(Span
* span
, char op
, int v
= 0) {
932 span
->history
[span
->nexthistory
] = op
;
933 span
->value
[span
->nexthistory
] = v
;
935 if (span
->nexthistory
== sizeof(span
->history
)) span
->nexthistory
= 0;
938 #define Event(s,o,v) ((void) 0)
941 // Allocator/deallocator for spans
942 static PageHeapAllocator
<Span
> span_allocator
;
943 static Span
* NewSpan(PageID p
, Length len
) {
944 Span
* result
= span_allocator
.New();
945 memset(result
, 0, sizeof(*result
));
947 result
->length
= len
;
949 result
->nexthistory
= 0;
954 static inline void DeleteSpan(Span
* span
) {
956 // In debug mode, trash the contents of deleted Spans
957 memset(span
, 0x3f, sizeof(*span
));
959 span_allocator
.Delete(span
);
962 // -------------------------------------------------------------------------
963 // Doubly linked list of spans.
964 // -------------------------------------------------------------------------
966 static inline void DLL_Init(Span
* list
) {
971 static inline void DLL_Remove(Span
* span
) {
972 span
->prev
->next
= span
->next
;
973 span
->next
->prev
= span
->prev
;
978 static ALWAYS_INLINE
bool DLL_IsEmpty(const Span
* list
) {
979 return list
->next
== list
;
982 static int DLL_Length(const Span
* list
) {
984 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
990 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
991 static void DLL_Print(const char* label
, const Span
* list
) {
992 MESSAGE("%-10s %p:", label
, list
);
993 for (const Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
994 MESSAGE(" <%p,%u,%u>", s
, s
->start
, s
->length
);
1000 static inline void DLL_Prepend(Span
* list
, Span
* span
) {
1001 ASSERT(span
->next
== NULL
);
1002 ASSERT(span
->prev
== NULL
);
1003 span
->next
= list
->next
;
1005 list
->next
->prev
= span
;
1009 // -------------------------------------------------------------------------
1010 // Stack traces kept for sampled allocations
1011 // The following state is protected by pageheap_lock_.
1012 // -------------------------------------------------------------------------
1014 // size/depth are made the same size as a pointer so that some generic
1015 // code below can conveniently cast them back and forth to void*.
1016 static const int kMaxStackDepth
= 31;
1018 uintptr_t size
; // Size of object
1019 uintptr_t depth
; // Number of PC values stored in array below
1020 void* stack
[kMaxStackDepth
];
1022 static PageHeapAllocator
<StackTrace
> stacktrace_allocator
;
1023 static Span sampled_objects
;
1025 // -------------------------------------------------------------------------
1026 // Map from page-id to per-page data
1027 // -------------------------------------------------------------------------
1029 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1030 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1031 // because sometimes the sizeclass is all the information we need.
1033 // Selector class -- general selector uses 3-level map
1034 template <int BITS
> class MapSelector
{
1036 typedef TCMalloc_PageMap3
<BITS
-kPageShift
> Type
;
1037 typedef PackedCache
<BITS
, uint64_t> CacheType
;
1040 #if defined(WTF_CHANGES)
1041 #if PLATFORM(X86_64)
1042 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1043 // can be excluded from the PageMap key.
1044 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1046 static const size_t kBitsUnusedOn64Bit
= 16;
1048 static const size_t kBitsUnusedOn64Bit
= 0;
1051 // A three-level map for 64-bit machines
1052 template <> class MapSelector
<64> {
1054 typedef TCMalloc_PageMap3
<64 - kPageShift
- kBitsUnusedOn64Bit
> Type
;
1055 typedef PackedCache
<64, uint64_t> CacheType
;
1059 // A two-level map for 32-bit machines
1060 template <> class MapSelector
<32> {
1062 typedef TCMalloc_PageMap2
<32 - kPageShift
> Type
;
1063 typedef PackedCache
<32 - kPageShift
, uint16_t> CacheType
;
1066 // -------------------------------------------------------------------------
1067 // Page-level allocator
1068 // * Eager coalescing
1070 // Heap for page-level allocation. We allow allocating and freeing a
1071 // contiguous runs of pages (called a "span").
1072 // -------------------------------------------------------------------------
1074 class TCMalloc_PageHeap
{
1078 // Allocate a run of "n" pages. Returns zero if out of memory.
1079 Span
* New(Length n
);
1081 // Delete the span "[p, p+n-1]".
1082 // REQUIRES: span was returned by earlier call to New() and
1083 // has not yet been deleted.
1084 void Delete(Span
* span
);
1086 // Mark an allocated span as being used for small objects of the
1087 // specified size-class.
1088 // REQUIRES: span was returned by an earlier call to New()
1089 // and has not yet been deleted.
1090 void RegisterSizeClass(Span
* span
, size_t sc
);
1092 // Split an allocated span into two spans: one of length "n" pages
1093 // followed by another span of length "span->length - n" pages.
1094 // Modifies "*span" to point to the first span of length "n" pages.
1095 // Returns a pointer to the second span.
1097 // REQUIRES: "0 < n < span->length"
1098 // REQUIRES: !span->free
1099 // REQUIRES: span->sizeclass == 0
1100 Span
* Split(Span
* span
, Length n
);
1102 // Return the descriptor for the specified page.
1103 inline Span
* GetDescriptor(PageID p
) const {
1104 return reinterpret_cast<Span
*>(pagemap_
.get(p
));
1108 inline Span
* GetDescriptorEnsureSafe(PageID p
)
1110 pagemap_
.Ensure(p
, 1);
1111 return GetDescriptor(p
);
1114 size_t ReturnedBytes() const;
1117 // Dump state to stderr
1119 void Dump(TCMalloc_Printer
* out
);
1122 // Return number of bytes allocated from system
1123 inline uint64_t SystemBytes() const { return system_bytes_
; }
1125 // Return number of free bytes in heap
1126 uint64_t FreeBytes() const {
1127 return (static_cast<uint64_t>(free_pages_
) << kPageShift
);
1131 bool CheckList(Span
* list
, Length min_pages
, Length max_pages
);
1133 // Release all pages on the free list for reuse by the OS:
1134 void ReleaseFreePages();
1136 // Return 0 if we have no information, or else the correct sizeclass for p.
1137 // Reads and writes to pagemap_cache_ do not require locking.
1138 // The entries are 64 bits on 64-bit hardware and 16 bits on
1139 // 32-bit hardware, and we don't mind raciness as long as each read of
1140 // an entry yields a valid entry, not a partially updated entry.
1141 size_t GetSizeClassIfCached(PageID p
) const {
1142 return pagemap_cache_
.GetOrDefault(p
, 0);
1144 void CacheSizeClass(PageID p
, size_t cl
) const { pagemap_cache_
.Put(p
, cl
); }
1147 // Pick the appropriate map and cache types based on pointer size
1148 typedef MapSelector
<8*sizeof(uintptr_t)>::Type PageMap
;
1149 typedef MapSelector
<8*sizeof(uintptr_t)>::CacheType PageMapCache
;
1151 mutable PageMapCache pagemap_cache_
;
1153 // We segregate spans of a given size into two circular linked
1154 // lists: one for normal spans, and one for spans whose memory
1155 // has been returned to the system.
1161 // List of free spans of length >= kMaxPages
1164 // Array mapping from span length to a doubly linked list of free spans
1165 SpanList free_
[kMaxPages
];
1167 // Number of pages kept in free lists
1168 uintptr_t free_pages_
;
1170 // Bytes allocated from system
1171 uint64_t system_bytes_
;
1173 bool GrowHeap(Length n
);
1175 // REQUIRES span->length >= n
1176 // Remove span from its free list, and move any leftover part of
1177 // span into appropriate free lists. Also update "span" to have
1178 // length exactly "n" and mark it as non-free so it can be returned
1181 // "released" is true iff "span" was found on a "returned" list.
1182 void Carve(Span
* span
, Length n
, bool released
);
1184 void RecordSpan(Span
* span
) {
1185 pagemap_
.set(span
->start
, span
);
1186 if (span
->length
> 1) {
1187 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1191 // Allocate a large span of length == n. If successful, returns a
1192 // span of exactly the specified length. Else, returns NULL.
1193 Span
* AllocLarge(Length n
);
1195 // Incrementally release some memory to the system.
1196 // IncrementalScavenge(n) is called whenever n pages are freed.
1197 void IncrementalScavenge(Length n
);
1199 // Number of pages to deallocate before doing more scavenging
1200 int64_t scavenge_counter_
;
1202 // Index of last free list we scavenged
1203 size_t scavenge_index_
;
1205 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
1206 friend class FastMallocZone
;
1210 void TCMalloc_PageHeap::init()
1212 pagemap_
.init(MetaDataAlloc
);
1213 pagemap_cache_
= PageMapCache(0);
1216 scavenge_counter_
= 0;
1217 // Start scavenging at kMaxPages list
1218 scavenge_index_
= kMaxPages
-1;
1219 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
1220 DLL_Init(&large_
.normal
);
1221 DLL_Init(&large_
.returned
);
1222 for (size_t i
= 0; i
< kMaxPages
; i
++) {
1223 DLL_Init(&free_
[i
].normal
);
1224 DLL_Init(&free_
[i
].returned
);
1228 inline Span
* TCMalloc_PageHeap::New(Length n
) {
1232 // Find first size >= n that has a non-empty list
1233 for (Length s
= n
; s
< kMaxPages
; s
++) {
1235 bool released
= false;
1236 if (!DLL_IsEmpty(&free_
[s
].normal
)) {
1237 // Found normal span
1238 ll
= &free_
[s
].normal
;
1239 } else if (!DLL_IsEmpty(&free_
[s
].returned
)) {
1240 // Found returned span; reallocate it
1241 ll
= &free_
[s
].returned
;
1244 // Keep looking in larger classes
1248 Span
* result
= ll
->next
;
1249 Carve(result
, n
, released
);
1250 #if TCMALLOC_TRACK_DECOMMITED_SPANS
1251 if (result
->decommitted
) {
1252 TCMalloc_SystemCommit(reinterpret_cast<void*>(result
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1253 result
->decommitted
= false;
1261 Span
* result
= AllocLarge(n
);
1262 if (result
!= NULL
) {
1263 ASSERT_SPAN_COMMITTED(result
);
1267 // Grow the heap and try again
1273 return AllocLarge(n
);
1276 Span
* TCMalloc_PageHeap::AllocLarge(Length n
) {
1277 // find the best span (closest to n in size).
1278 // The following loops implements address-ordered best-fit.
1279 bool from_released
= false;
1282 // Search through normal list
1283 for (Span
* span
= large_
.normal
.next
;
1284 span
!= &large_
.normal
;
1285 span
= span
->next
) {
1286 if (span
->length
>= n
) {
1288 || (span
->length
< best
->length
)
1289 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1291 from_released
= false;
1296 // Search through released list in case it has a better fit
1297 for (Span
* span
= large_
.returned
.next
;
1298 span
!= &large_
.returned
;
1299 span
= span
->next
) {
1300 if (span
->length
>= n
) {
1302 || (span
->length
< best
->length
)
1303 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1305 from_released
= true;
1311 Carve(best
, n
, from_released
);
1312 #if TCMALLOC_TRACK_DECOMMITED_SPANS
1313 if (best
->decommitted
) {
1314 TCMalloc_SystemCommit(reinterpret_cast<void*>(best
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1315 best
->decommitted
= false;
1325 Span
* TCMalloc_PageHeap::Split(Span
* span
, Length n
) {
1327 ASSERT(n
< span
->length
);
1328 ASSERT(!span
->free
);
1329 ASSERT(span
->sizeclass
== 0);
1330 Event(span
, 'T', n
);
1332 const Length extra
= span
->length
- n
;
1333 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1334 Event(leftover
, 'U', extra
);
1335 RecordSpan(leftover
);
1336 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
1342 #if !TCMALLOC_TRACK_DECOMMITED_SPANS
1343 static ALWAYS_INLINE
void propagateDecommittedState(Span
*, Span
*) { }
1345 static ALWAYS_INLINE
void propagateDecommittedState(Span
* destination
, Span
* source
)
1347 destination
->decommitted
= source
->decommitted
;
1351 inline void TCMalloc_PageHeap::Carve(Span
* span
, Length n
, bool released
) {
1355 Event(span
, 'A', n
);
1357 const int extra
= static_cast<int>(span
->length
- n
);
1360 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1362 propagateDecommittedState(leftover
, span
);
1363 Event(leftover
, 'S', extra
);
1364 RecordSpan(leftover
);
1366 // Place leftover span on appropriate free list
1367 SpanList
* listpair
= (static_cast<size_t>(extra
) < kMaxPages
) ? &free_
[extra
] : &large_
;
1368 Span
* dst
= released
? &listpair
->returned
: &listpair
->normal
;
1369 DLL_Prepend(dst
, leftover
);
1372 pagemap_
.set(span
->start
+ n
- 1, span
);
1376 #if !TCMALLOC_TRACK_DECOMMITED_SPANS
1377 static ALWAYS_INLINE
void mergeDecommittedStates(Span
*, Span
*) { }
1379 static ALWAYS_INLINE
void mergeDecommittedStates(Span
* destination
, Span
* other
)
1381 if (other
->decommitted
)
1382 destination
->decommitted
= true;
1386 inline void TCMalloc_PageHeap::Delete(Span
* span
) {
1388 ASSERT(!span
->free
);
1389 ASSERT(span
->length
> 0);
1390 ASSERT(GetDescriptor(span
->start
) == span
);
1391 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
1392 span
->sizeclass
= 0;
1393 #ifndef NO_TCMALLOC_SAMPLES
1397 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1398 // necessary. We do not bother resetting the stale pagemap
1399 // entries for the pieces we are merging together because we only
1400 // care about the pagemap entries for the boundaries.
1402 // Note that the spans we merge into "span" may come out of
1403 // a "returned" list. For simplicity, we move these into the
1404 // "normal" list of the appropriate size class.
1405 const PageID p
= span
->start
;
1406 const Length n
= span
->length
;
1407 Span
* prev
= GetDescriptor(p
-1);
1408 if (prev
!= NULL
&& prev
->free
) {
1409 // Merge preceding span into this span
1410 ASSERT(prev
->start
+ prev
->length
== p
);
1411 const Length len
= prev
->length
;
1412 mergeDecommittedStates(span
, prev
);
1416 span
->length
+= len
;
1417 pagemap_
.set(span
->start
, span
);
1418 Event(span
, 'L', len
);
1420 Span
* next
= GetDescriptor(p
+n
);
1421 if (next
!= NULL
&& next
->free
) {
1422 // Merge next span into this span
1423 ASSERT(next
->start
== p
+n
);
1424 const Length len
= next
->length
;
1425 mergeDecommittedStates(span
, next
);
1428 span
->length
+= len
;
1429 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1430 Event(span
, 'R', len
);
1433 Event(span
, 'D', span
->length
);
1435 if (span
->length
< kMaxPages
) {
1436 DLL_Prepend(&free_
[span
->length
].normal
, span
);
1438 DLL_Prepend(&large_
.normal
, span
);
1442 IncrementalScavenge(n
);
1446 void TCMalloc_PageHeap::IncrementalScavenge(Length n
) {
1447 // Fast path; not yet time to release memory
1448 scavenge_counter_
-= n
;
1449 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
1451 static const size_t kDefaultReleaseDelay
= 64;
1453 // Find index of free list to scavenge
1454 size_t index
= scavenge_index_
+ 1;
1455 for (size_t i
= 0; i
< kMaxPages
+1; i
++) {
1456 if (index
> kMaxPages
) index
= 0;
1457 SpanList
* slist
= (index
== kMaxPages
) ? &large_
: &free_
[index
];
1458 if (!DLL_IsEmpty(&slist
->normal
)) {
1459 // Release the last span on the normal portion of this list
1460 Span
* s
= slist
->normal
.prev
;
1462 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1463 static_cast<size_t>(s
->length
<< kPageShift
));
1464 #if TCMALLOC_TRACK_DECOMMITED_SPANS
1465 s
->decommitted
= true;
1467 DLL_Prepend(&slist
->returned
, s
);
1469 scavenge_counter_
= std::max
<size_t>(16UL, std::min
<size_t>(kDefaultReleaseDelay
, kDefaultReleaseDelay
- (free_pages_
/ kDefaultReleaseDelay
)));
1471 if (index
== kMaxPages
&& !DLL_IsEmpty(&slist
->normal
))
1472 scavenge_index_
= index
- 1;
1474 scavenge_index_
= index
;
1480 // Nothing to scavenge, delay for a while
1481 scavenge_counter_
= kDefaultReleaseDelay
;
1484 void TCMalloc_PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
1485 // Associate span object with all interior pages as well
1486 ASSERT(!span
->free
);
1487 ASSERT(GetDescriptor(span
->start
) == span
);
1488 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
1489 Event(span
, 'C', sc
);
1490 span
->sizeclass
= static_cast<unsigned int>(sc
);
1491 for (Length i
= 1; i
< span
->length
-1; i
++) {
1492 pagemap_
.set(span
->start
+i
, span
);
1497 size_t TCMalloc_PageHeap::ReturnedBytes() const {
1499 for (unsigned s
= 0; s
< kMaxPages
; s
++) {
1500 const int r_length
= DLL_Length(&free_
[s
].returned
);
1501 unsigned r_pages
= s
* r_length
;
1502 result
+= r_pages
<< kPageShift
;
1505 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
)
1506 result
+= s
->length
<< kPageShift
;
1512 static double PagesToMB(uint64_t pages
) {
1513 return (pages
<< kPageShift
) / 1048576.0;
1516 void TCMalloc_PageHeap::Dump(TCMalloc_Printer
* out
) {
1517 int nonempty_sizes
= 0;
1518 for (int s
= 0; s
< kMaxPages
; s
++) {
1519 if (!DLL_IsEmpty(&free_
[s
].normal
) || !DLL_IsEmpty(&free_
[s
].returned
)) {
1523 out
->printf("------------------------------------------------\n");
1524 out
->printf("PageHeap: %d sizes; %6.1f MB free\n",
1525 nonempty_sizes
, PagesToMB(free_pages_
));
1526 out
->printf("------------------------------------------------\n");
1527 uint64_t total_normal
= 0;
1528 uint64_t total_returned
= 0;
1529 for (int s
= 0; s
< kMaxPages
; s
++) {
1530 const int n_length
= DLL_Length(&free_
[s
].normal
);
1531 const int r_length
= DLL_Length(&free_
[s
].returned
);
1532 if (n_length
+ r_length
> 0) {
1533 uint64_t n_pages
= s
* n_length
;
1534 uint64_t r_pages
= s
* r_length
;
1535 total_normal
+= n_pages
;
1536 total_returned
+= r_pages
;
1537 out
->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1538 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1540 (n_length
+ r_length
),
1541 PagesToMB(n_pages
+ r_pages
),
1542 PagesToMB(total_normal
+ total_returned
),
1544 PagesToMB(total_returned
));
1548 uint64_t n_pages
= 0;
1549 uint64_t r_pages
= 0;
1552 out
->printf("Normal large spans:\n");
1553 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
1554 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1555 s
->length
, PagesToMB(s
->length
));
1556 n_pages
+= s
->length
;
1559 out
->printf("Unmapped large spans:\n");
1560 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
1561 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1562 s
->length
, PagesToMB(s
->length
));
1563 r_pages
+= s
->length
;
1566 total_normal
+= n_pages
;
1567 total_returned
+= r_pages
;
1568 out
->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1569 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1570 (n_spans
+ r_spans
),
1571 PagesToMB(n_pages
+ r_pages
),
1572 PagesToMB(total_normal
+ total_returned
),
1574 PagesToMB(total_returned
));
1578 bool TCMalloc_PageHeap::GrowHeap(Length n
) {
1579 ASSERT(kMaxPages
>= kMinSystemAlloc
);
1580 if (n
> kMaxValidPages
) return false;
1581 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
1583 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1586 // Try growing just "n" pages
1588 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1590 if (ptr
== NULL
) return false;
1592 ask
= actual_size
>> kPageShift
;
1594 uint64_t old_system_bytes
= system_bytes_
;
1595 system_bytes_
+= (ask
<< kPageShift
);
1596 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1599 // If we have already a lot of pages allocated, just pre allocate a bunch of
1600 // memory for the page map. This prevents fragmentation by pagemap metadata
1601 // when a program keeps allocating and freeing large blocks.
1603 if (old_system_bytes
< kPageMapBigAllocationThreshold
1604 && system_bytes_
>= kPageMapBigAllocationThreshold
) {
1605 pagemap_
.PreallocateMoreMemory();
1608 // Make sure pagemap_ has entries for all of the new pages.
1609 // Plus ensure one before and one after so coalescing code
1610 // does not need bounds-checking.
1611 if (pagemap_
.Ensure(p
-1, ask
+2)) {
1612 // Pretend the new area is allocated and then Delete() it to
1613 // cause any necessary coalescing to occur.
1615 // We do not adjust free_pages_ here since Delete() will do it for us.
1616 Span
* span
= NewSpan(p
, ask
);
1622 // We could not allocate memory within "pagemap_"
1623 // TODO: Once we can return memory to the system, return the new span
1628 bool TCMalloc_PageHeap::Check() {
1629 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
1630 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
1631 CheckList(&large_
.normal
, kMaxPages
, 1000000000);
1632 CheckList(&large_
.returned
, kMaxPages
, 1000000000);
1633 for (Length s
= 1; s
< kMaxPages
; s
++) {
1634 CheckList(&free_
[s
].normal
, s
, s
);
1635 CheckList(&free_
[s
].returned
, s
, s
);
1641 bool TCMalloc_PageHeap::CheckList(Span
*, Length
, Length
) {
1645 bool TCMalloc_PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
) {
1646 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1647 CHECK_CONDITION(s
->free
);
1648 CHECK_CONDITION(s
->length
>= min_pages
);
1649 CHECK_CONDITION(s
->length
<= max_pages
);
1650 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
1651 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
1657 static void ReleaseFreeList(Span
* list
, Span
* returned
) {
1658 // Walk backwards through list so that when we push these
1659 // spans on the "returned" list, we preserve the order.
1660 while (!DLL_IsEmpty(list
)) {
1661 Span
* s
= list
->prev
;
1663 DLL_Prepend(returned
, s
);
1664 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1665 static_cast<size_t>(s
->length
<< kPageShift
));
1669 void TCMalloc_PageHeap::ReleaseFreePages() {
1670 for (Length s
= 0; s
< kMaxPages
; s
++) {
1671 ReleaseFreeList(&free_
[s
].normal
, &free_
[s
].returned
);
1673 ReleaseFreeList(&large_
.normal
, &large_
.returned
);
1677 //-------------------------------------------------------------------
1679 //-------------------------------------------------------------------
1681 class TCMalloc_ThreadCache_FreeList
{
1683 void* list_
; // Linked list of nodes
1684 uint16_t length_
; // Current length
1685 uint16_t lowater_
; // Low water mark for list length
1694 // Return current length of list
1695 int length() const {
1700 bool empty() const {
1701 return list_
== NULL
;
1704 // Low-water mark management
1705 int lowwatermark() const { return lowater_
; }
1706 void clear_lowwatermark() { lowater_
= length_
; }
1708 ALWAYS_INLINE
void Push(void* ptr
) {
1709 SLL_Push(&list_
, ptr
);
1713 void PushRange(int N
, void *start
, void *end
) {
1714 SLL_PushRange(&list_
, start
, end
);
1715 length_
= length_
+ static_cast<uint16_t>(N
);
1718 void PopRange(int N
, void **start
, void **end
) {
1719 SLL_PopRange(&list_
, N
, start
, end
);
1720 ASSERT(length_
>= N
);
1721 length_
= length_
- static_cast<uint16_t>(N
);
1722 if (length_
< lowater_
) lowater_
= length_
;
1725 ALWAYS_INLINE
void* Pop() {
1726 ASSERT(list_
!= NULL
);
1728 if (length_
< lowater_
) lowater_
= length_
;
1729 return SLL_Pop(&list_
);
1733 template <class Finder
, class Reader
>
1734 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1736 for (void* nextObject
= list_
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1737 finder
.visit(nextObject
);
1742 //-------------------------------------------------------------------
1743 // Data kept per thread
1744 //-------------------------------------------------------------------
1746 class TCMalloc_ThreadCache
{
1748 typedef TCMalloc_ThreadCache_FreeList FreeList
;
1750 typedef DWORD ThreadIdentifier
;
1752 typedef pthread_t ThreadIdentifier
;
1755 size_t size_
; // Combined size of data
1756 ThreadIdentifier tid_
; // Which thread owns it
1757 bool in_setspecific_
; // Called pthread_setspecific?
1758 FreeList list_
[kNumClasses
]; // Array indexed by size-class
1760 // We sample allocations, biased by the size of the allocation
1761 uint32_t rnd_
; // Cheap random number generator
1762 size_t bytes_until_sample_
; // Bytes until we sample next
1764 // Allocate a new heap. REQUIRES: pageheap_lock is held.
1765 static inline TCMalloc_ThreadCache
* NewHeap(ThreadIdentifier tid
);
1767 // Use only as pthread thread-specific destructor function.
1768 static void DestroyThreadCache(void* ptr
);
1770 // All ThreadCache objects are kept in a linked list (for stats collection)
1771 TCMalloc_ThreadCache
* next_
;
1772 TCMalloc_ThreadCache
* prev_
;
1774 void Init(ThreadIdentifier tid
);
1777 // Accessors (mostly just for printing stats)
1778 int freelist_length(size_t cl
) const { return list_
[cl
].length(); }
1780 // Total byte size in cache
1781 size_t Size() const { return size_
; }
1783 void* Allocate(size_t size
);
1784 void Deallocate(void* ptr
, size_t size_class
);
1786 void FetchFromCentralCache(size_t cl
, size_t allocationSize
);
1787 void ReleaseToCentralCache(size_t cl
, int N
);
1791 // Record allocation of "k" bytes. Return true iff allocation
1792 // should be sampled
1793 bool SampleAllocation(size_t k
);
1795 // Pick next sampling point
1796 void PickNextSample(size_t k
);
1798 static void InitModule();
1799 static void InitTSD();
1800 static TCMalloc_ThreadCache
* GetThreadHeap();
1801 static TCMalloc_ThreadCache
* GetCache();
1802 static TCMalloc_ThreadCache
* GetCacheIfPresent();
1803 static TCMalloc_ThreadCache
* CreateCacheIfNecessary();
1804 static void DeleteCache(TCMalloc_ThreadCache
* heap
);
1805 static void BecomeIdle();
1806 static void RecomputeThreadCacheSize();
1809 template <class Finder
, class Reader
>
1810 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1812 for (unsigned sizeClass
= 0; sizeClass
< kNumClasses
; sizeClass
++)
1813 list_
[sizeClass
].enumerateFreeObjects(finder
, reader
);
1818 //-------------------------------------------------------------------
1819 // Data kept per size-class in central cache
1820 //-------------------------------------------------------------------
1822 class TCMalloc_Central_FreeList
{
1824 void Init(size_t cl
);
1826 // These methods all do internal locking.
1828 // Insert the specified range into the central freelist. N is the number of
1829 // elements in the range.
1830 void InsertRange(void *start
, void *end
, int N
);
1832 // Returns the actual number of fetched elements into N.
1833 void RemoveRange(void **start
, void **end
, int *N
);
1835 // Returns the number of free objects in cache.
1837 SpinLockHolder
h(&lock_
);
1841 // Returns the number of free objects in the transfer cache.
1843 SpinLockHolder
h(&lock_
);
1844 return used_slots_
* num_objects_to_move
[size_class_
];
1848 template <class Finder
, class Reader
>
1849 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
, TCMalloc_Central_FreeList
* remoteCentralFreeList
)
1851 for (Span
* span
= &empty_
; span
&& span
!= &empty_
; span
= (span
->next
? reader(span
->next
) : 0))
1852 ASSERT(!span
->objects
);
1854 ASSERT(!nonempty_
.objects
);
1855 static const ptrdiff_t nonemptyOffset
= reinterpret_cast<const char*>(&nonempty_
) - reinterpret_cast<const char*>(this);
1857 Span
* remoteNonempty
= reinterpret_cast<Span
*>(reinterpret_cast<char*>(remoteCentralFreeList
) + nonemptyOffset
);
1858 Span
* remoteSpan
= nonempty_
.next
;
1860 for (Span
* span
= reader(remoteSpan
); span
&& remoteSpan
!= remoteNonempty
; remoteSpan
= span
->next
, span
= (span
->next
? reader(span
->next
) : 0)) {
1861 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1862 finder
.visit(nextObject
);
1868 // REQUIRES: lock_ is held
1869 // Remove object from cache and return.
1870 // Return NULL if no free entries in cache.
1871 void* FetchFromSpans();
1873 // REQUIRES: lock_ is held
1874 // Remove object from cache and return. Fetches
1875 // from pageheap if cache is empty. Only returns
1876 // NULL on allocation failure.
1877 void* FetchFromSpansSafe();
1879 // REQUIRES: lock_ is held
1880 // Release a linked list of objects to spans.
1881 // May temporarily release lock_.
1882 void ReleaseListToSpans(void *start
);
1884 // REQUIRES: lock_ is held
1885 // Release an object to spans.
1886 // May temporarily release lock_.
1887 void ReleaseToSpans(void* object
);
1889 // REQUIRES: lock_ is held
1890 // Populate cache by fetching from the page heap.
1891 // May temporarily release lock_.
1894 // REQUIRES: lock is held.
1895 // Tries to make room for a TCEntry. If the cache is full it will try to
1896 // expand it at the cost of some other cache size. Return false if there is
1898 bool MakeCacheSpace();
1900 // REQUIRES: lock_ for locked_size_class is held.
1901 // Picks a "random" size class to steal TCEntry slot from. In reality it
1902 // just iterates over the sizeclasses but does so without taking a lock.
1903 // Returns true on success.
1904 // May temporarily lock a "random" size class.
1905 static bool EvictRandomSizeClass(size_t locked_size_class
, bool force
);
1907 // REQUIRES: lock_ is *not* held.
1908 // Tries to shrink the Cache. If force is true it will relase objects to
1909 // spans if it allows it to shrink the cache. Return false if it failed to
1910 // shrink the cache. Decrements cache_size_ on succeess.
1911 // May temporarily take lock_. If it takes lock_, the locked_size_class
1912 // lock is released to the thread from holding two size class locks
1913 // concurrently which could lead to a deadlock.
1914 bool ShrinkCache(int locked_size_class
, bool force
);
1916 // This lock protects all the data members. cached_entries and cache_size_
1917 // may be looked at without holding the lock.
1920 // We keep linked lists of empty and non-empty spans.
1921 size_t size_class_
; // My size class
1922 Span empty_
; // Dummy header for list of empty spans
1923 Span nonempty_
; // Dummy header for list of non-empty spans
1924 size_t counter_
; // Number of free objects in cache entry
1926 // Here we reserve space for TCEntry cache slots. Since one size class can
1927 // end up getting all the TCEntries quota in the system we just preallocate
1928 // sufficient number of entries here.
1929 TCEntry tc_slots_
[kNumTransferEntries
];
1931 // Number of currently used cached entries in tc_slots_. This variable is
1932 // updated under a lock but can be read without one.
1933 int32_t used_slots_
;
1934 // The current number of slots for this size class. This is an
1935 // adaptive value that is increased if there is lots of traffic
1936 // on a given size class.
1937 int32_t cache_size_
;
1940 // Pad each CentralCache object to multiple of 64 bytes
1941 class TCMalloc_Central_FreeListPadded
: public TCMalloc_Central_FreeList
{
1943 char pad_
[(64 - (sizeof(TCMalloc_Central_FreeList
) % 64)) % 64];
1946 //-------------------------------------------------------------------
1948 //-------------------------------------------------------------------
1950 // Central cache -- a collection of free-lists, one per size-class.
1951 // We have a separate lock per free-list to reduce contention.
1952 static TCMalloc_Central_FreeListPadded central_cache
[kNumClasses
];
1954 // Page-level allocator
1955 static SpinLock pageheap_lock
= SPINLOCK_INITIALIZER
;
1958 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned
));
1960 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)];
1962 static bool phinited
= false;
1964 // Avoid extra level of indirection by making "pageheap" be just an alias
1965 // of pageheap_memory.
1968 TCMalloc_PageHeap
* m_pageHeap
;
1971 static inline TCMalloc_PageHeap
* getPageHeap()
1973 PageHeapUnion u
= { &pageheap_memory
[0] };
1974 return u
.m_pageHeap
;
1977 #define pageheap getPageHeap()
1979 // If TLS is available, we also store a copy
1980 // of the per-thread object in a __thread variable
1981 // since __thread variables are faster to read
1982 // than pthread_getspecific(). We still need
1983 // pthread_setspecific() because __thread
1984 // variables provide no way to run cleanup
1985 // code when a thread is destroyed.
1987 static __thread TCMalloc_ThreadCache
*threadlocal_heap
;
1989 // Thread-specific key. Initialization here is somewhat tricky
1990 // because some Linux startup code invokes malloc() before it
1991 // is in a good enough state to handle pthread_keycreate().
1992 // Therefore, we use TSD keys only after tsd_inited is set to true.
1993 // Until then, we use a slow path to get the heap object.
1994 static bool tsd_inited
= false;
1995 static pthread_key_t heap_key
;
1997 DWORD tlsIndex
= TLS_OUT_OF_INDEXES
;
2000 static ALWAYS_INLINE
void setThreadHeap(TCMalloc_ThreadCache
* heap
)
2002 // still do pthread_setspecific when using MSVC fast TLS to
2003 // benefit from the delete callback.
2004 pthread_setspecific(heap_key
, heap
);
2006 TlsSetValue(tlsIndex
, heap
);
2010 // Allocator for thread heaps
2011 static PageHeapAllocator
<TCMalloc_ThreadCache
> threadheap_allocator
;
2013 // Linked list of heap objects. Protected by pageheap_lock.
2014 static TCMalloc_ThreadCache
* thread_heaps
= NULL
;
2015 static int thread_heap_count
= 0;
2017 // Overall thread cache size. Protected by pageheap_lock.
2018 static size_t overall_thread_cache_size
= kDefaultOverallThreadCacheSize
;
2020 // Global per-thread cache size. Writes are protected by
2021 // pageheap_lock. Reads are done without any locking, which should be
2022 // fine as long as size_t can be written atomically and we don't place
2023 // invariants between this variable and other pieces of state.
2024 static volatile size_t per_thread_cache_size
= kMaxThreadCacheSize
;
2026 //-------------------------------------------------------------------
2027 // Central cache implementation
2028 //-------------------------------------------------------------------
2030 void TCMalloc_Central_FreeList::Init(size_t cl
) {
2034 DLL_Init(&nonempty_
);
2039 ASSERT(cache_size_
<= kNumTransferEntries
);
2042 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start
) {
2044 void *next
= SLL_Next(start
);
2045 ReleaseToSpans(start
);
2050 ALWAYS_INLINE
void TCMalloc_Central_FreeList::ReleaseToSpans(void* object
) {
2051 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
2052 Span
* span
= pageheap
->GetDescriptor(p
);
2053 ASSERT(span
!= NULL
);
2054 ASSERT(span
->refcount
> 0);
2056 // If span is empty, move it to non-empty list
2057 if (span
->objects
== NULL
) {
2059 DLL_Prepend(&nonempty_
, span
);
2060 Event(span
, 'N', 0);
2063 // The following check is expensive, so it is disabled by default
2065 // Check that object does not occur in list
2067 for (void* p
= span
->objects
; p
!= NULL
; p
= *((void**) p
)) {
2068 ASSERT(p
!= object
);
2071 ASSERT(got
+ span
->refcount
==
2072 (span
->length
<<kPageShift
)/ByteSizeForClass(span
->sizeclass
));
2077 if (span
->refcount
== 0) {
2078 Event(span
, '#', 0);
2079 counter_
-= (span
->length
<<kPageShift
) / ByteSizeForClass(span
->sizeclass
);
2082 // Release central list lock while operating on pageheap
2085 SpinLockHolder
h(&pageheap_lock
);
2086 pageheap
->Delete(span
);
2090 *(reinterpret_cast<void**>(object
)) = span
->objects
;
2091 span
->objects
= object
;
2095 ALWAYS_INLINE
bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2096 size_t locked_size_class
, bool force
) {
2097 static int race_counter
= 0;
2098 int t
= race_counter
++; // Updated without a lock, but who cares.
2099 if (t
>= static_cast<int>(kNumClasses
)) {
2100 while (t
>= static_cast<int>(kNumClasses
)) {
2106 ASSERT(t
< static_cast<int>(kNumClasses
));
2107 if (t
== static_cast<int>(locked_size_class
)) return false;
2108 return central_cache
[t
].ShrinkCache(static_cast<int>(locked_size_class
), force
);
2111 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2112 // Is there room in the cache?
2113 if (used_slots_
< cache_size_
) return true;
2114 // Check if we can expand this cache?
2115 if (cache_size_
== kNumTransferEntries
) return false;
2116 // Ok, we'll try to grab an entry from some other size class.
2117 if (EvictRandomSizeClass(size_class_
, false) ||
2118 EvictRandomSizeClass(size_class_
, true)) {
2119 // Succeeded in evicting, we're going to make our cache larger.
2128 class LockInverter
{
2130 SpinLock
*held_
, *temp_
;
2132 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
2133 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
2134 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
2138 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class
, bool force
) {
2139 // Start with a quick check without taking a lock.
2140 if (cache_size_
== 0) return false;
2141 // We don't evict from a full cache unless we are 'forcing'.
2142 if (force
== false && used_slots_
== cache_size_
) return false;
2144 // Grab lock, but first release the other lock held by this thread. We use
2145 // the lock inverter to ensure that we never hold two size class locks
2146 // concurrently. That can create a deadlock because there is no well
2147 // defined nesting order.
2148 LockInverter
li(¢ral_cache
[locked_size_class
].lock_
, &lock_
);
2149 ASSERT(used_slots_
<= cache_size_
);
2150 ASSERT(0 <= cache_size_
);
2151 if (cache_size_
== 0) return false;
2152 if (used_slots_
== cache_size_
) {
2153 if (force
== false) return false;
2154 // ReleaseListToSpans releases the lock, so we have to make all the
2155 // updates to the central list before calling it.
2158 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
2165 void TCMalloc_Central_FreeList::InsertRange(void *start
, void *end
, int N
) {
2166 SpinLockHolder
h(&lock_
);
2167 if (N
== num_objects_to_move
[size_class_
] &&
2169 int slot
= used_slots_
++;
2171 ASSERT(slot
< kNumTransferEntries
);
2172 TCEntry
*entry
= &tc_slots_
[slot
];
2173 entry
->head
= start
;
2177 ReleaseListToSpans(start
);
2180 void TCMalloc_Central_FreeList::RemoveRange(void **start
, void **end
, int *N
) {
2184 SpinLockHolder
h(&lock_
);
2185 if (num
== num_objects_to_move
[size_class_
] && used_slots_
> 0) {
2186 int slot
= --used_slots_
;
2188 TCEntry
*entry
= &tc_slots_
[slot
];
2189 *start
= entry
->head
;
2194 // TODO: Prefetch multiple TCEntries?
2195 void *tail
= FetchFromSpansSafe();
2197 // We are completely out of memory.
2198 *start
= *end
= NULL
;
2203 SLL_SetNext(tail
, NULL
);
2206 while (count
< num
) {
2207 void *t
= FetchFromSpans();
2218 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2219 void *t
= FetchFromSpans();
2222 t
= FetchFromSpans();
2227 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2228 if (DLL_IsEmpty(&nonempty_
)) return NULL
;
2229 Span
* span
= nonempty_
.next
;
2231 ASSERT(span
->objects
!= NULL
);
2232 ASSERT_SPAN_COMMITTED(span
);
2234 void* result
= span
->objects
;
2235 span
->objects
= *(reinterpret_cast<void**>(result
));
2236 if (span
->objects
== NULL
) {
2237 // Move to empty list
2239 DLL_Prepend(&empty_
, span
);
2240 Event(span
, 'E', 0);
2246 // Fetch memory from the system and add to the central cache freelist.
2247 ALWAYS_INLINE
void TCMalloc_Central_FreeList::Populate() {
2248 // Release central list lock while operating on pageheap
2250 const size_t npages
= class_to_pages
[size_class_
];
2254 SpinLockHolder
h(&pageheap_lock
);
2255 span
= pageheap
->New(npages
);
2256 if (span
) pageheap
->RegisterSizeClass(span
, size_class_
);
2259 MESSAGE("allocation failed: %d\n", errno
);
2263 ASSERT_SPAN_COMMITTED(span
);
2264 ASSERT(span
->length
== npages
);
2265 // Cache sizeclass info eagerly. Locking is not necessary.
2266 // (Instead of being eager, we could just replace any stale info
2267 // about this span, but that seems to be no better in practice.)
2268 for (size_t i
= 0; i
< npages
; i
++) {
2269 pageheap
->CacheSizeClass(span
->start
+ i
, size_class_
);
2272 // Split the block into pieces and add to the free-list
2273 // TODO: coloring of objects to avoid cache conflicts?
2274 void** tail
= &span
->objects
;
2275 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
2276 char* limit
= ptr
+ (npages
<< kPageShift
);
2277 const size_t size
= ByteSizeForClass(size_class_
);
2280 while ((nptr
= ptr
+ size
) <= limit
) {
2282 tail
= reinterpret_cast<void**>(ptr
);
2286 ASSERT(ptr
<= limit
);
2288 span
->refcount
= 0; // No sub-object in use yet
2290 // Add span to list of non-empty spans
2292 DLL_Prepend(&nonempty_
, span
);
2296 //-------------------------------------------------------------------
2297 // TCMalloc_ThreadCache implementation
2298 //-------------------------------------------------------------------
2300 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k
) {
2301 if (bytes_until_sample_
< k
) {
2305 bytes_until_sample_
-= k
;
2310 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid
) {
2315 in_setspecific_
= false;
2316 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2320 // Initialize RNG -- run it for a bit to get to good values
2321 bytes_until_sample_
= 0;
2322 rnd_
= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2323 for (int i
= 0; i
< 100; i
++) {
2324 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter
* 2));
2328 void TCMalloc_ThreadCache::Cleanup() {
2329 // Put unused memory back into central cache
2330 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2331 if (list_
[cl
].length() > 0) {
2332 ReleaseToCentralCache(cl
, list_
[cl
].length());
2337 ALWAYS_INLINE
void* TCMalloc_ThreadCache::Allocate(size_t size
) {
2338 ASSERT(size
<= kMaxSize
);
2339 const size_t cl
= SizeClass(size
);
2340 FreeList
* list
= &list_
[cl
];
2341 size_t allocationSize
= ByteSizeForClass(cl
);
2342 if (list
->empty()) {
2343 FetchFromCentralCache(cl
, allocationSize
);
2344 if (list
->empty()) return NULL
;
2346 size_
-= allocationSize
;
2350 inline void TCMalloc_ThreadCache::Deallocate(void* ptr
, size_t cl
) {
2351 size_
+= ByteSizeForClass(cl
);
2352 FreeList
* list
= &list_
[cl
];
2354 // If enough data is free, put back into central cache
2355 if (list
->length() > kMaxFreeListLength
) {
2356 ReleaseToCentralCache(cl
, num_objects_to_move
[cl
]);
2358 if (size_
>= per_thread_cache_size
) Scavenge();
2361 // Remove some objects of class "cl" from central cache and add to thread heap
2362 ALWAYS_INLINE
void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl
, size_t allocationSize
) {
2363 int fetch_count
= num_objects_to_move
[cl
];
2365 central_cache
[cl
].RemoveRange(&start
, &end
, &fetch_count
);
2366 list_
[cl
].PushRange(fetch_count
, start
, end
);
2367 size_
+= allocationSize
* fetch_count
;
2370 // Remove some objects of class "cl" from thread heap and add to central cache
2371 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl
, int N
) {
2373 FreeList
* src
= &list_
[cl
];
2374 if (N
> src
->length()) N
= src
->length();
2375 size_
-= N
*ByteSizeForClass(cl
);
2377 // We return prepackaged chains of the correct size to the central cache.
2378 // TODO: Use the same format internally in the thread caches?
2379 int batch_size
= num_objects_to_move
[cl
];
2380 while (N
> batch_size
) {
2382 src
->PopRange(batch_size
, &head
, &tail
);
2383 central_cache
[cl
].InsertRange(head
, tail
, batch_size
);
2387 src
->PopRange(N
, &head
, &tail
);
2388 central_cache
[cl
].InsertRange(head
, tail
, N
);
2391 // Release idle memory to the central cache
2392 inline void TCMalloc_ThreadCache::Scavenge() {
2393 // If the low-water mark for the free list is L, it means we would
2394 // not have had to allocate anything from the central cache even if
2395 // we had reduced the free list size by L. We aim to get closer to
2396 // that situation by dropping L/2 nodes from the free list. This
2397 // may not release much memory, but if so we will call scavenge again
2398 // pretty soon and the low-water marks will be high on that call.
2399 //int64 start = CycleClock::Now();
2401 for (size_t cl
= 0; cl
< kNumClasses
; cl
++) {
2402 FreeList
* list
= &list_
[cl
];
2403 const int lowmark
= list
->lowwatermark();
2405 const int drop
= (lowmark
> 1) ? lowmark
/2 : 1;
2406 ReleaseToCentralCache(cl
, drop
);
2408 list
->clear_lowwatermark();
2411 //int64 finish = CycleClock::Now();
2413 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2416 void TCMalloc_ThreadCache::PickNextSample(size_t k
) {
2417 // Make next "random" number
2418 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2419 static const uint32_t kPoly
= (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2421 rnd_
= (r
<< 1) ^ ((static_cast<int32_t>(r
) >> 31) & kPoly
);
2423 // Next point is "rnd_ % (sample_period)". I.e., average
2424 // increment is "sample_period/2".
2425 const int flag_value
= static_cast<int>(FLAGS_tcmalloc_sample_parameter
);
2426 static int last_flag_value
= -1;
2428 if (flag_value
!= last_flag_value
) {
2429 SpinLockHolder
h(&sample_period_lock
);
2431 for (i
= 0; i
< (static_cast<int>(sizeof(primes_list
)/sizeof(primes_list
[0])) - 1); i
++) {
2432 if (primes_list
[i
] >= flag_value
) {
2436 sample_period
= primes_list
[i
];
2437 last_flag_value
= flag_value
;
2440 bytes_until_sample_
+= rnd_
% sample_period
;
2442 if (k
> (static_cast<size_t>(-1) >> 2)) {
2443 // If the user has asked for a huge allocation then it is possible
2444 // for the code below to loop infinitely. Just return (note that
2445 // this throws off the sampling accuracy somewhat, but a user who
2446 // is allocating more than 1G of memory at a time can live with a
2447 // minor inaccuracy in profiling of small allocations, and also
2448 // would rather not wait for the loop below to terminate).
2452 while (bytes_until_sample_
< k
) {
2453 // Increase bytes_until_sample_ by enough average sampling periods
2454 // (sample_period >> 1) to allow us to sample past the current
2456 bytes_until_sample_
+= (sample_period
>> 1);
2459 bytes_until_sample_
-= k
;
2462 void TCMalloc_ThreadCache::InitModule() {
2463 // There is a slight potential race here because of double-checked
2464 // locking idiom. However, as long as the program does a small
2465 // allocation before switching to multi-threaded mode, we will be
2466 // fine. We increase the chances of doing such a small allocation
2467 // by doing one in the constructor of the module_enter_exit_hook
2468 // object declared below.
2469 SpinLockHolder
h(&pageheap_lock
);
2475 threadheap_allocator
.Init();
2476 span_allocator
.Init();
2477 span_allocator
.New(); // Reduce cache conflicts
2478 span_allocator
.New(); // Reduce cache conflicts
2479 stacktrace_allocator
.Init();
2480 DLL_Init(&sampled_objects
);
2481 for (size_t i
= 0; i
< kNumClasses
; ++i
) {
2482 central_cache
[i
].Init(i
);
2486 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
2487 FastMallocZone::init();
2492 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid
) {
2493 // Create the heap and add it to the linked list
2494 TCMalloc_ThreadCache
*heap
= threadheap_allocator
.New();
2496 heap
->next_
= thread_heaps
;
2498 if (thread_heaps
!= NULL
) thread_heaps
->prev_
= heap
;
2499 thread_heaps
= heap
;
2500 thread_heap_count
++;
2501 RecomputeThreadCacheSize();
2505 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetThreadHeap() {
2507 // __thread is faster, but only when the kernel supports it
2508 if (KernelSupportsTLS())
2509 return threadlocal_heap
;
2510 #elif COMPILER(MSVC)
2511 return static_cast<TCMalloc_ThreadCache
*>(TlsGetValue(tlsIndex
));
2513 return static_cast<TCMalloc_ThreadCache
*>(pthread_getspecific(heap_key
));
2517 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCache() {
2518 TCMalloc_ThreadCache
* ptr
= NULL
;
2522 ptr
= GetThreadHeap();
2524 if (ptr
== NULL
) ptr
= CreateCacheIfNecessary();
2528 // In deletion paths, we do not try to create a thread-cache. This is
2529 // because we may be in the thread destruction code and may have
2530 // already cleaned up the cache for this thread.
2531 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCacheIfPresent() {
2532 if (!tsd_inited
) return NULL
;
2533 void* const p
= GetThreadHeap();
2534 return reinterpret_cast<TCMalloc_ThreadCache
*>(p
);
2537 void TCMalloc_ThreadCache::InitTSD() {
2538 ASSERT(!tsd_inited
);
2539 pthread_key_create(&heap_key
, DestroyThreadCache
);
2541 tlsIndex
= TlsAlloc();
2546 // We may have used a fake pthread_t for the main thread. Fix it.
2548 memset(&zero
, 0, sizeof(zero
));
2551 SpinLockHolder
h(&pageheap_lock
);
2553 ASSERT(pageheap_lock
.IsHeld());
2555 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2558 h
->tid_
= GetCurrentThreadId();
2561 if (pthread_equal(h
->tid_
, zero
)) {
2562 h
->tid_
= pthread_self();
2568 TCMalloc_ThreadCache
* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2569 // Initialize per-thread data if necessary
2570 TCMalloc_ThreadCache
* heap
= NULL
;
2572 SpinLockHolder
h(&pageheap_lock
);
2579 me
= GetCurrentThreadId();
2582 // Early on in glibc's life, we cannot even call pthread_self()
2585 memset(&me
, 0, sizeof(me
));
2587 me
= pthread_self();
2591 // This may be a recursive malloc call from pthread_setspecific()
2592 // In that case, the heap for this thread has already been created
2593 // and added to the linked list. So we search for that first.
2594 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2596 if (h
->tid_
== me
) {
2598 if (pthread_equal(h
->tid_
, me
)) {
2605 if (heap
== NULL
) heap
= NewHeap(me
);
2608 // We call pthread_setspecific() outside the lock because it may
2609 // call malloc() recursively. The recursive call will never get
2610 // here again because it will find the already allocated heap in the
2611 // linked list of heaps.
2612 if (!heap
->in_setspecific_
&& tsd_inited
) {
2613 heap
->in_setspecific_
= true;
2614 setThreadHeap(heap
);
2619 void TCMalloc_ThreadCache::BecomeIdle() {
2620 if (!tsd_inited
) return; // No caches yet
2621 TCMalloc_ThreadCache
* heap
= GetThreadHeap();
2622 if (heap
== NULL
) return; // No thread cache to remove
2623 if (heap
->in_setspecific_
) return; // Do not disturb the active caller
2625 heap
->in_setspecific_
= true;
2626 pthread_setspecific(heap_key
, NULL
);
2628 // Also update the copy in __thread
2629 threadlocal_heap
= NULL
;
2631 heap
->in_setspecific_
= false;
2632 if (GetThreadHeap() == heap
) {
2633 // Somehow heap got reinstated by a recursive call to malloc
2634 // from pthread_setspecific. We give up in this case.
2638 // We can now get rid of the heap
2642 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr
) {
2643 // Note that "ptr" cannot be NULL since pthread promises not
2644 // to invoke the destructor on NULL values, but for safety,
2646 if (ptr
== NULL
) return;
2648 // Prevent fast path of GetThreadHeap() from returning heap.
2649 threadlocal_heap
= NULL
;
2651 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache
*>(ptr
));
2654 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache
* heap
) {
2655 // Remove all memory from heap
2658 // Remove from linked list
2659 SpinLockHolder
h(&pageheap_lock
);
2660 if (heap
->next_
!= NULL
) heap
->next_
->prev_
= heap
->prev_
;
2661 if (heap
->prev_
!= NULL
) heap
->prev_
->next_
= heap
->next_
;
2662 if (thread_heaps
== heap
) thread_heaps
= heap
->next_
;
2663 thread_heap_count
--;
2664 RecomputeThreadCacheSize();
2666 threadheap_allocator
.Delete(heap
);
2669 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
2670 // Divide available space across threads
2671 int n
= thread_heap_count
> 0 ? thread_heap_count
: 1;
2672 size_t space
= overall_thread_cache_size
/ n
;
2674 // Limit to allowed range
2675 if (space
< kMinThreadCacheSize
) space
= kMinThreadCacheSize
;
2676 if (space
> kMaxThreadCacheSize
) space
= kMaxThreadCacheSize
;
2678 per_thread_cache_size
= space
;
2681 void TCMalloc_ThreadCache::Print() const {
2682 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2683 MESSAGE(" %5" PRIuS
" : %4d len; %4d lo\n",
2684 ByteSizeForClass(cl
),
2686 list_
[cl
].lowwatermark());
2690 // Extract interesting stats
2691 struct TCMallocStats
{
2692 uint64_t system_bytes
; // Bytes alloced from system
2693 uint64_t thread_bytes
; // Bytes in thread caches
2694 uint64_t central_bytes
; // Bytes in central cache
2695 uint64_t transfer_bytes
; // Bytes in central transfer cache
2696 uint64_t pageheap_bytes
; // Bytes in page heap
2697 uint64_t metadata_bytes
; // Bytes alloced for metadata
2701 // Get stats into "r". Also get per-size-class counts if class_count != NULL
2702 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
) {
2703 r
->central_bytes
= 0;
2704 r
->transfer_bytes
= 0;
2705 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2706 const int length
= central_cache
[cl
].length();
2707 const int tc_length
= central_cache
[cl
].tc_length();
2708 r
->central_bytes
+= static_cast<uint64_t>(ByteSizeForClass(cl
)) * length
;
2709 r
->transfer_bytes
+=
2710 static_cast<uint64_t>(ByteSizeForClass(cl
)) * tc_length
;
2711 if (class_count
) class_count
[cl
] = length
+ tc_length
;
2714 // Add stats from per-thread heaps
2715 r
->thread_bytes
= 0;
2717 SpinLockHolder
h(&pageheap_lock
);
2718 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2719 r
->thread_bytes
+= h
->Size();
2721 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2722 class_count
[cl
] += h
->freelist_length(cl
);
2729 SpinLockHolder
h(&pageheap_lock
);
2730 r
->system_bytes
= pageheap
->SystemBytes();
2731 r
->metadata_bytes
= metadata_system_bytes
;
2732 r
->pageheap_bytes
= pageheap
->FreeBytes();
2738 // WRITE stats to "out"
2739 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
2740 TCMallocStats stats
;
2741 uint64_t class_count
[kNumClasses
];
2742 ExtractStats(&stats
, (level
>= 2 ? class_count
: NULL
));
2745 out
->printf("------------------------------------------------\n");
2746 uint64_t cumulative
= 0;
2747 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2748 if (class_count
[cl
] > 0) {
2749 uint64_t class_bytes
= class_count
[cl
] * ByteSizeForClass(cl
);
2750 cumulative
+= class_bytes
;
2751 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
2752 "%8" PRIu64
" objs; %5.1f MB; %5.1f cum MB\n",
2753 cl
, ByteSizeForClass(cl
),
2755 class_bytes
/ 1048576.0,
2756 cumulative
/ 1048576.0);
2760 SpinLockHolder
h(&pageheap_lock
);
2761 pageheap
->Dump(out
);
2764 const uint64_t bytes_in_use
= stats
.system_bytes
2765 - stats
.pageheap_bytes
2766 - stats
.central_bytes
2767 - stats
.transfer_bytes
2768 - stats
.thread_bytes
;
2770 out
->printf("------------------------------------------------\n"
2771 "MALLOC: %12" PRIu64
" Heap size\n"
2772 "MALLOC: %12" PRIu64
" Bytes in use by application\n"
2773 "MALLOC: %12" PRIu64
" Bytes free in page heap\n"
2774 "MALLOC: %12" PRIu64
" Bytes free in central cache\n"
2775 "MALLOC: %12" PRIu64
" Bytes free in transfer cache\n"
2776 "MALLOC: %12" PRIu64
" Bytes free in thread caches\n"
2777 "MALLOC: %12" PRIu64
" Spans in use\n"
2778 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
2779 "MALLOC: %12" PRIu64
" Metadata allocated\n"
2780 "------------------------------------------------\n",
2783 stats
.pageheap_bytes
,
2784 stats
.central_bytes
,
2785 stats
.transfer_bytes
,
2787 uint64_t(span_allocator
.inuse()),
2788 uint64_t(threadheap_allocator
.inuse()),
2789 stats
.metadata_bytes
);
2792 static void PrintStats(int level
) {
2793 const int kBufferSize
= 16 << 10;
2794 char* buffer
= new char[kBufferSize
];
2795 TCMalloc_Printer
printer(buffer
, kBufferSize
);
2796 DumpStats(&printer
, level
);
2797 write(STDERR_FILENO
, buffer
, strlen(buffer
));
2801 static void** DumpStackTraces() {
2802 // Count how much space we need
2803 int needed_slots
= 0;
2805 SpinLockHolder
h(&pageheap_lock
);
2806 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2807 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2808 needed_slots
+= 3 + stack
->depth
;
2810 needed_slots
+= 100; // Slop in case sample grows
2811 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
2814 void** result
= new void*[needed_slots
];
2815 if (result
== NULL
) {
2816 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
2821 SpinLockHolder
h(&pageheap_lock
);
2823 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2824 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
2825 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2826 if (used_slots
+ 3 + stack
->depth
>= needed_slots
) {
2831 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
2832 result
[used_slots
+1] = reinterpret_cast<void*>(stack
->size
);
2833 result
[used_slots
+2] = reinterpret_cast<void*>(stack
->depth
);
2834 for (int d
= 0; d
< stack
->depth
; d
++) {
2835 result
[used_slots
+3+d
] = stack
->stack
[d
];
2837 used_slots
+= 3 + stack
->depth
;
2839 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
2846 // TCMalloc's support for extra malloc interfaces
2847 class TCMallocImplementation
: public MallocExtension
{
2849 virtual void GetStats(char* buffer
, int buffer_length
) {
2850 ASSERT(buffer_length
> 0);
2851 TCMalloc_Printer
printer(buffer
, buffer_length
);
2853 // Print level one stats unless lots of space is available
2854 if (buffer_length
< 10000) {
2855 DumpStats(&printer
, 1);
2857 DumpStats(&printer
, 2);
2861 virtual void** ReadStackTraces() {
2862 return DumpStackTraces();
2865 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
2866 ASSERT(name
!= NULL
);
2868 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
2869 TCMallocStats stats
;
2870 ExtractStats(&stats
, NULL
);
2871 *value
= stats
.system_bytes
2872 - stats
.thread_bytes
2873 - stats
.central_bytes
2874 - stats
.pageheap_bytes
;
2878 if (strcmp(name
, "generic.heap_size") == 0) {
2879 TCMallocStats stats
;
2880 ExtractStats(&stats
, NULL
);
2881 *value
= stats
.system_bytes
;
2885 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
2886 // We assume that bytes in the page heap are not fragmented too
2887 // badly, and are therefore available for allocation.
2888 SpinLockHolder
l(&pageheap_lock
);
2889 *value
= pageheap
->FreeBytes();
2893 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2894 SpinLockHolder
l(&pageheap_lock
);
2895 *value
= overall_thread_cache_size
;
2899 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
2900 TCMallocStats stats
;
2901 ExtractStats(&stats
, NULL
);
2902 *value
= stats
.thread_bytes
;
2909 virtual bool SetNumericProperty(const char* name
, size_t value
) {
2910 ASSERT(name
!= NULL
);
2912 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2913 // Clip the value to a reasonable range
2914 if (value
< kMinThreadCacheSize
) value
= kMinThreadCacheSize
;
2915 if (value
> (1<<30)) value
= (1<<30); // Limit to 1GB
2917 SpinLockHolder
l(&pageheap_lock
);
2918 overall_thread_cache_size
= static_cast<size_t>(value
);
2919 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
2926 virtual void MarkThreadIdle() {
2927 TCMalloc_ThreadCache::BecomeIdle();
2930 virtual void ReleaseFreeMemory() {
2931 SpinLockHolder
h(&pageheap_lock
);
2932 pageheap
->ReleaseFreePages();
2937 // The constructor allocates an object to ensure that initialization
2938 // runs before main(), and therefore we do not have a chance to become
2939 // multi-threaded before initialization. We also create the TSD key
2940 // here. Presumably by the time this constructor runs, glibc is in
2941 // good enough shape to handle pthread_key_create().
2943 // The constructor also takes the opportunity to tell STL to use
2944 // tcmalloc. We want to do this early, before construct time, so
2945 // all user STL allocations go through tcmalloc (which works really
2948 // The destructor prints stats when the program exits.
2949 class TCMallocGuard
{
2953 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
2954 // Check whether the kernel also supports TLS (needs to happen at runtime)
2955 CheckIfKernelSupportsTLS();
2958 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
2959 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
2963 TCMalloc_ThreadCache::InitTSD();
2966 MallocExtension::Register(new TCMallocImplementation
);
2972 const char* env
= getenv("MALLOCSTATS");
2974 int level
= atoi(env
);
2975 if (level
< 1) level
= 1;
2979 UnpatchWindowsFunctions();
2986 static TCMallocGuard module_enter_exit_hook
;
2990 //-------------------------------------------------------------------
2991 // Helpers for the exported routines below
2992 //-------------------------------------------------------------------
2996 static Span
* DoSampledAllocation(size_t size
) {
2998 // Grab the stack trace outside the heap lock
3000 tmp
.depth
= GetStackTrace(tmp
.stack
, kMaxStackDepth
, 1);
3003 SpinLockHolder
h(&pageheap_lock
);
3005 Span
*span
= pageheap
->New(pages(size
== 0 ? 1 : size
));
3010 // Allocate stack trace
3011 StackTrace
*stack
= stacktrace_allocator
.New();
3012 if (stack
== NULL
) {
3013 // Sampling failed because of lack of memory
3019 span
->objects
= stack
;
3020 DLL_Prepend(&sampled_objects
, span
);
3026 static inline bool CheckCachedSizeClass(void *ptr
) {
3027 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3028 size_t cached_value
= pageheap
->GetSizeClassIfCached(p
);
3029 return cached_value
== 0 ||
3030 cached_value
== pageheap
->GetDescriptor(p
)->sizeclass
;
3033 static inline void* CheckedMallocResult(void *result
)
3035 ASSERT(result
== 0 || CheckCachedSizeClass(result
));
3039 static inline void* SpanToMallocResult(Span
*span
) {
3040 ASSERT_SPAN_COMMITTED(span
);
3041 pageheap
->CacheSizeClass(span
->start
, 0);
3043 CheckedMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
3047 template <bool crashOnFailure
>
3049 static ALWAYS_INLINE
void* do_malloc(size_t size
) {
3053 ASSERT(!isForbidden());
3056 // The following call forces module initialization
3057 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3059 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
3060 Span
* span
= DoSampledAllocation(size
);
3062 ret
= SpanToMallocResult(span
);
3066 if (size
> kMaxSize
) {
3067 // Use page-level allocator
3068 SpinLockHolder
h(&pageheap_lock
);
3069 Span
* span
= pageheap
->New(pages(size
));
3071 ret
= SpanToMallocResult(span
);
3074 // The common case, and also the simplest. This just pops the
3075 // size-appropriate freelist, afer replenishing it if it's empty.
3076 ret
= CheckedMallocResult(heap
->Allocate(size
));
3080 if (crashOnFailure
) // This branch should be optimized out by the compiler.
3089 static ALWAYS_INLINE
void do_free(void* ptr
) {
3090 if (ptr
== NULL
) return;
3091 ASSERT(pageheap
!= NULL
); // Should not call free() before malloc()
3092 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3094 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3097 span
= pageheap
->GetDescriptor(p
);
3098 cl
= span
->sizeclass
;
3099 pageheap
->CacheSizeClass(p
, cl
);
3102 #ifndef NO_TCMALLOC_SAMPLES
3103 ASSERT(!pageheap
->GetDescriptor(p
)->sample
);
3105 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCacheIfPresent();
3107 heap
->Deallocate(ptr
, cl
);
3109 // Delete directly into central cache
3110 SLL_SetNext(ptr
, NULL
);
3111 central_cache
[cl
].InsertRange(ptr
, ptr
, 1);
3114 SpinLockHolder
h(&pageheap_lock
);
3115 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
3116 ASSERT(span
!= NULL
&& span
->start
== p
);
3117 #ifndef NO_TCMALLOC_SAMPLES
3120 stacktrace_allocator
.Delete(reinterpret_cast<StackTrace
*>(span
->objects
));
3121 span
->objects
= NULL
;
3124 pageheap
->Delete(span
);
3129 // For use by exported routines below that want specific alignments
3131 // Note: this code can be slow, and can significantly fragment memory.
3132 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
3133 // not be invoked very often. This requirement simplifies our
3134 // implementation and allows us to tune for expected allocation
3136 static void* do_memalign(size_t align
, size_t size
) {
3137 ASSERT((align
& (align
- 1)) == 0);
3139 if (pageheap
== NULL
) TCMalloc_ThreadCache::InitModule();
3141 // Allocate at least one byte to avoid boundary conditions below
3142 if (size
== 0) size
= 1;
3144 if (size
<= kMaxSize
&& align
< kPageSize
) {
3145 // Search through acceptable size classes looking for one with
3146 // enough alignment. This depends on the fact that
3147 // InitSizeClasses() currently produces several size classes that
3148 // are aligned at powers of two. We will waste time and space if
3149 // we miss in the size class array, but that is deemed acceptable
3150 // since memalign() should be used rarely.
3151 size_t cl
= SizeClass(size
);
3152 while (cl
< kNumClasses
&& ((class_to_size
[cl
] & (align
- 1)) != 0)) {
3155 if (cl
< kNumClasses
) {
3156 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3157 return CheckedMallocResult(heap
->Allocate(class_to_size
[cl
]));
3161 // We will allocate directly from the page heap
3162 SpinLockHolder
h(&pageheap_lock
);
3164 if (align
<= kPageSize
) {
3165 // Any page-level allocation will be fine
3166 // TODO: We could put the rest of this page in the appropriate
3167 // TODO: cache but it does not seem worth it.
3168 Span
* span
= pageheap
->New(pages(size
));
3169 return span
== NULL
? NULL
: SpanToMallocResult(span
);
3172 // Allocate extra pages and carve off an aligned portion
3173 const Length alloc
= pages(size
+ align
);
3174 Span
* span
= pageheap
->New(alloc
);
3175 if (span
== NULL
) return NULL
;
3177 // Skip starting portion so that we end up aligned
3179 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
3182 ASSERT(skip
< alloc
);
3184 Span
* rest
= pageheap
->Split(span
, skip
);
3185 pageheap
->Delete(span
);
3189 // Skip trailing portion that we do not need to return
3190 const Length needed
= pages(size
);
3191 ASSERT(span
->length
>= needed
);
3192 if (span
->length
> needed
) {
3193 Span
* trailer
= pageheap
->Split(span
, needed
);
3194 pageheap
->Delete(trailer
);
3196 return SpanToMallocResult(span
);
3200 // Helpers for use by exported routines below:
3203 static inline void do_malloc_stats() {
3208 static inline int do_mallopt(int, int) {
3209 return 1; // Indicates error
3212 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3213 static inline struct mallinfo
do_mallinfo() {
3214 TCMallocStats stats
;
3215 ExtractStats(&stats
, NULL
);
3217 // Just some of the fields are filled in.
3218 struct mallinfo info
;
3219 memset(&info
, 0, sizeof(info
));
3221 // Unfortunately, the struct contains "int" field, so some of the
3222 // size values will be truncated.
3223 info
.arena
= static_cast<int>(stats
.system_bytes
);
3224 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
3225 + stats
.central_bytes
3226 + stats
.transfer_bytes
);
3227 info
.fordblks
= static_cast<int>(stats
.pageheap_bytes
);
3228 info
.uordblks
= static_cast<int>(stats
.system_bytes
3229 - stats
.thread_bytes
3230 - stats
.central_bytes
3231 - stats
.transfer_bytes
3232 - stats
.pageheap_bytes
);
3238 //-------------------------------------------------------------------
3239 // Exported routines
3240 //-------------------------------------------------------------------
3242 // CAVEAT: The code structure below ensures that MallocHook methods are always
3243 // called from the stack frame of the invoked allocation function.
3244 // heap-checker.cc depends on this to start a stack trace from
3245 // the call to the (de)allocation function.
3250 #define do_malloc do_malloc<crashOnFailure>
3252 template <bool crashOnFailure
>
3253 void* malloc(size_t);
3255 void* fastMalloc(size_t size
)
3257 return malloc
<true>(size
);
3260 void* tryFastMalloc(size_t size
)
3262 return malloc
<false>(size
);
3265 template <bool crashOnFailure
>
3268 void* malloc(size_t size
) {
3269 void* result
= do_malloc(size
);
3271 MallocHook::InvokeNewHook(result
, size
);
3279 void free(void* ptr
) {
3281 MallocHook::InvokeDeleteHook(ptr
);
3289 template <bool crashOnFailure
>
3290 void* calloc(size_t, size_t);
3292 void* fastCalloc(size_t n
, size_t elem_size
)
3294 return calloc
<true>(n
, elem_size
);
3297 void* tryFastCalloc(size_t n
, size_t elem_size
)
3299 return calloc
<false>(n
, elem_size
);
3302 template <bool crashOnFailure
>
3305 void* calloc(size_t n
, size_t elem_size
) {
3306 const size_t totalBytes
= n
* elem_size
;
3308 // Protect against overflow
3309 if (n
> 1 && elem_size
&& (totalBytes
/ elem_size
) != n
)
3312 void* result
= do_malloc(totalBytes
);
3313 if (result
!= NULL
) {
3314 memset(result
, 0, totalBytes
);
3317 MallocHook::InvokeNewHook(result
, totalBytes
);
3322 // Since cfree isn't used anywhere, we don't compile it in.
3327 void cfree(void* ptr
) {
3329 MallocHook::InvokeDeleteHook(ptr
);
3338 template <bool crashOnFailure
>
3339 void* realloc(void*, size_t);
3341 void* fastRealloc(void* old_ptr
, size_t new_size
)
3343 return realloc
<true>(old_ptr
, new_size
);
3346 void* tryFastRealloc(void* old_ptr
, size_t new_size
)
3348 return realloc
<false>(old_ptr
, new_size
);
3351 template <bool crashOnFailure
>
3354 void* realloc(void* old_ptr
, size_t new_size
) {
3355 if (old_ptr
== NULL
) {
3356 void* result
= do_malloc(new_size
);
3358 MallocHook::InvokeNewHook(result
, new_size
);
3362 if (new_size
== 0) {
3364 MallocHook::InvokeDeleteHook(old_ptr
);
3370 // Get the size of the old entry
3371 const PageID p
= reinterpret_cast<uintptr_t>(old_ptr
) >> kPageShift
;
3372 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3376 span
= pageheap
->GetDescriptor(p
);
3377 cl
= span
->sizeclass
;
3378 pageheap
->CacheSizeClass(p
, cl
);
3381 old_size
= ByteSizeForClass(cl
);
3383 ASSERT(span
!= NULL
);
3384 old_size
= span
->length
<< kPageShift
;
3387 // Reallocate if the new size is larger than the old size,
3388 // or if the new size is significantly smaller than the old size.
3389 if ((new_size
> old_size
) || (AllocationSize(new_size
) < old_size
)) {
3390 // Need to reallocate
3391 void* new_ptr
= do_malloc(new_size
);
3392 if (new_ptr
== NULL
) {
3396 MallocHook::InvokeNewHook(new_ptr
, new_size
);
3398 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
3400 MallocHook::InvokeDeleteHook(old_ptr
);
3402 // We could use a variant of do_free() that leverages the fact
3403 // that we already know the sizeclass of old_ptr. The benefit
3404 // would be small, so don't bother.
3416 static SpinLock set_new_handler_lock
= SPINLOCK_INITIALIZER
;
3418 static inline void* cpp_alloc(size_t size
, bool nothrow
) {
3420 void* p
= do_malloc(size
);
3424 if (p
== NULL
) { // allocation failed
3425 // Get the current new handler. NB: this function is not
3426 // thread-safe. We make a feeble stab at making it so here, but
3427 // this lock only protects against tcmalloc interfering with
3428 // itself, not with other libraries calling set_new_handler.
3429 std::new_handler nh
;
3431 SpinLockHolder
h(&set_new_handler_lock
);
3432 nh
= std::set_new_handler(0);
3433 (void) std::set_new_handler(nh
);
3435 // If no new_handler is established, the allocation failed.
3437 if (nothrow
) return 0;
3438 throw std::bad_alloc();
3440 // Otherwise, try the new_handler. If it returns, retry the
3441 // allocation. If it throws std::bad_alloc, fail the allocation.
3442 // if it throws something else, don't interfere.
3445 } catch (const std::bad_alloc
&) {
3446 if (!nothrow
) throw;
3449 } else { // allocation success
3456 void* operator new(size_t size
) {
3457 void* p
= cpp_alloc(size
, false);
3458 // We keep this next instruction out of cpp_alloc for a reason: when
3459 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3460 // new call into cpp_alloc, which messes up our whole section-based
3461 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3462 // isn't the last thing this fn calls, and prevents the folding.
3463 MallocHook::InvokeNewHook(p
, size
);
3467 void* operator new(size_t size
, const std::nothrow_t
&) __THROW
{
3468 void* p
= cpp_alloc(size
, true);
3469 MallocHook::InvokeNewHook(p
, size
);
3473 void operator delete(void* p
) __THROW
{
3474 MallocHook::InvokeDeleteHook(p
);
3478 void operator delete(void* p
, const std::nothrow_t
&) __THROW
{
3479 MallocHook::InvokeDeleteHook(p
);
3483 void* operator new[](size_t size
) {
3484 void* p
= cpp_alloc(size
, false);
3485 // We keep this next instruction out of cpp_alloc for a reason: when
3486 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3487 // new call into cpp_alloc, which messes up our whole section-based
3488 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3489 // isn't the last thing this fn calls, and prevents the folding.
3490 MallocHook::InvokeNewHook(p
, size
);
3494 void* operator new[](size_t size
, const std::nothrow_t
&) __THROW
{
3495 void* p
= cpp_alloc(size
, true);
3496 MallocHook::InvokeNewHook(p
, size
);
3500 void operator delete[](void* p
) __THROW
{
3501 MallocHook::InvokeDeleteHook(p
);
3505 void operator delete[](void* p
, const std::nothrow_t
&) __THROW
{
3506 MallocHook::InvokeDeleteHook(p
);
3510 extern "C" void* memalign(size_t align
, size_t size
) __THROW
{
3511 void* result
= do_memalign(align
, size
);
3512 MallocHook::InvokeNewHook(result
, size
);
3516 extern "C" int posix_memalign(void** result_ptr
, size_t align
, size_t size
)
3518 if (((align
% sizeof(void*)) != 0) ||
3519 ((align
& (align
- 1)) != 0) ||
3524 void* result
= do_memalign(align
, size
);
3525 MallocHook::InvokeNewHook(result
, size
);
3526 if (result
== NULL
) {
3529 *result_ptr
= result
;
3534 static size_t pagesize
= 0;
3536 extern "C" void* valloc(size_t size
) __THROW
{
3537 // Allocate page-aligned object of length >= size bytes
3538 if (pagesize
== 0) pagesize
= getpagesize();
3539 void* result
= do_memalign(pagesize
, size
);
3540 MallocHook::InvokeNewHook(result
, size
);
3544 extern "C" void* pvalloc(size_t size
) __THROW
{
3545 // Round up size to a multiple of pagesize
3546 if (pagesize
== 0) pagesize
= getpagesize();
3547 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
3548 void* result
= do_memalign(pagesize
, size
);
3549 MallocHook::InvokeNewHook(result
, size
);
3553 extern "C" void malloc_stats(void) {
3557 extern "C" int mallopt(int cmd
, int value
) {
3558 return do_mallopt(cmd
, value
);
3561 #ifdef HAVE_STRUCT_MALLINFO
3562 extern "C" struct mallinfo
mallinfo(void) {
3563 return do_mallinfo();
3567 //-------------------------------------------------------------------
3568 // Some library routines on RedHat 9 allocate memory using malloc()
3569 // and free it using __libc_free() (or vice-versa). Since we provide
3570 // our own implementations of malloc/free, we need to make sure that
3571 // the __libc_XXX variants (defined as part of glibc) also point to
3572 // the same implementations.
3573 //-------------------------------------------------------------------
3575 #if defined(__GLIBC__)
3577 # if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
3578 // Potentially faster variants that use the gcc alias extension.
3579 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
3580 # define ALIAS(x) __attribute__ ((weak, alias (x)))
3581 void* __libc_malloc(size_t size
) ALIAS("malloc");
3582 void __libc_free(void* ptr
) ALIAS("free");
3583 void* __libc_realloc(void* ptr
, size_t size
) ALIAS("realloc");
3584 void* __libc_calloc(size_t n
, size_t size
) ALIAS("calloc");
3585 void __libc_cfree(void* ptr
) ALIAS("cfree");
3586 void* __libc_memalign(size_t align
, size_t s
) ALIAS("memalign");
3587 void* __libc_valloc(size_t size
) ALIAS("valloc");
3588 void* __libc_pvalloc(size_t size
) ALIAS("pvalloc");
3589 int __posix_memalign(void** r
, size_t a
, size_t s
) ALIAS("posix_memalign");
3591 # else /* not __GNUC__ */
3592 // Portable wrappers
3593 void* __libc_malloc(size_t size
) { return malloc(size
); }
3594 void __libc_free(void* ptr
) { free(ptr
); }
3595 void* __libc_realloc(void* ptr
, size_t size
) { return realloc(ptr
, size
); }
3596 void* __libc_calloc(size_t n
, size_t size
) { return calloc(n
, size
); }
3597 void __libc_cfree(void* ptr
) { cfree(ptr
); }
3598 void* __libc_memalign(size_t align
, size_t s
) { return memalign(align
, s
); }
3599 void* __libc_valloc(size_t size
) { return valloc(size
); }
3600 void* __libc_pvalloc(size_t size
) { return pvalloc(size
); }
3601 int __posix_memalign(void** r
, size_t a
, size_t s
) {
3602 return posix_memalign(r
, a
, s
);
3604 # endif /* __GNUC__ */
3606 #endif /* __GLIBC__ */
3608 // Override __libc_memalign in libc on linux boxes specially.
3609 // They have a bug in libc that causes them to (very rarely) allocate
3610 // with __libc_memalign() yet deallocate with free() and the
3611 // definitions above don't catch it.
3612 // This function is an exception to the rule of calling MallocHook method
3613 // from the stack frame of the allocation function;
3614 // heap-checker handles this special case explicitly.
3615 static void *MemalignOverride(size_t align
, size_t size
, const void *caller
)
3617 void* result
= do_memalign(align
, size
);
3618 MallocHook::InvokeNewHook(result
, size
);
3621 void *(*__memalign_hook
)(size_t, size_t, const void *) = MemalignOverride
;
3625 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
3627 class FreeObjectFinder
{
3628 const RemoteMemoryReader
& m_reader
;
3629 HashSet
<void*> m_freeObjects
;
3632 FreeObjectFinder(const RemoteMemoryReader
& reader
) : m_reader(reader
) { }
3634 void visit(void* ptr
) { m_freeObjects
.add(ptr
); }
3635 bool isFreeObject(void* ptr
) const { return m_freeObjects
.contains(ptr
); }
3636 size_t freeObjectCount() const { return m_freeObjects
.size(); }
3638 void findFreeObjects(TCMalloc_ThreadCache
* threadCache
)
3640 for (; threadCache
; threadCache
= (threadCache
->next_
? m_reader(threadCache
->next_
) : 0))
3641 threadCache
->enumerateFreeObjects(*this, m_reader
);
3644 void findFreeObjects(TCMalloc_Central_FreeListPadded
* centralFreeList
, size_t numSizes
, TCMalloc_Central_FreeListPadded
* remoteCentralFreeList
)
3646 for (unsigned i
= 0; i
< numSizes
; i
++)
3647 centralFreeList
[i
].enumerateFreeObjects(*this, m_reader
, remoteCentralFreeList
+ i
);
3651 class PageMapFreeObjectFinder
{
3652 const RemoteMemoryReader
& m_reader
;
3653 FreeObjectFinder
& m_freeObjectFinder
;
3656 PageMapFreeObjectFinder(const RemoteMemoryReader
& reader
, FreeObjectFinder
& freeObjectFinder
)
3658 , m_freeObjectFinder(freeObjectFinder
)
3661 int visit(void* ptr
) const
3666 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3668 void* ptr
= reinterpret_cast<void*>(span
->start
<< kPageShift
);
3669 m_freeObjectFinder
.visit(ptr
);
3670 } else if (span
->sizeclass
) {
3671 // Walk the free list of the small-object span, keeping track of each object seen
3672 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *m_reader(reinterpret_cast<void**>(nextObject
)))
3673 m_freeObjectFinder
.visit(nextObject
);
3675 return span
->length
;
3679 class PageMapMemoryUsageRecorder
{
3682 unsigned m_typeMask
;
3683 vm_range_recorder_t
* m_recorder
;
3684 const RemoteMemoryReader
& m_reader
;
3685 const FreeObjectFinder
& m_freeObjectFinder
;
3686 mutable HashSet
<void*> m_seenPointers
;
3689 PageMapMemoryUsageRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
, const FreeObjectFinder
& freeObjectFinder
)
3691 , m_context(context
)
3692 , m_typeMask(typeMask
)
3693 , m_recorder(recorder
)
3695 , m_freeObjectFinder(freeObjectFinder
)
3698 int visit(void* ptr
) const
3703 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3704 if (m_seenPointers
.contains(ptr
))
3705 return span
->length
;
3706 m_seenPointers
.add(ptr
);
3708 // Mark the memory used for the Span itself as an administrative region
3709 vm_range_t ptrRange
= { reinterpret_cast<vm_address_t
>(ptr
), sizeof(Span
) };
3710 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3711 (*m_recorder
)(m_task
, m_context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &ptrRange
, 1);
3713 ptrRange
.address
= span
->start
<< kPageShift
;
3714 ptrRange
.size
= span
->length
* kPageSize
;
3716 // Mark the memory region the span represents as candidates for containing pointers
3717 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3718 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptrRange
, 1);
3720 if (!span
->free
&& (m_typeMask
& MALLOC_PTR_IN_USE_RANGE_TYPE
)) {
3721 // If it's an allocated large object span, mark it as in use
3722 if (span
->sizeclass
== 0 && !m_freeObjectFinder
.isFreeObject(reinterpret_cast<void*>(ptrRange
.address
)))
3723 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &ptrRange
, 1);
3724 else if (span
->sizeclass
) {
3725 const size_t byteSize
= ByteSizeForClass(span
->sizeclass
);
3726 unsigned totalObjects
= (span
->length
<< kPageShift
) / byteSize
;
3727 ASSERT(span
->refcount
<= totalObjects
);
3728 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
3730 // Mark each allocated small object within the span as in use
3731 for (unsigned i
= 0; i
< totalObjects
; i
++) {
3732 char* thisObject
= ptr
+ (i
* byteSize
);
3733 if (m_freeObjectFinder
.isFreeObject(thisObject
))
3736 vm_range_t objectRange
= { reinterpret_cast<vm_address_t
>(thisObject
), byteSize
};
3737 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &objectRange
, 1);
3742 return span
->length
;
3746 kern_return_t
FastMallocZone::enumerate(task_t task
, void* context
, unsigned typeMask
, vm_address_t zoneAddress
, memory_reader_t reader
, vm_range_recorder_t recorder
)
3748 RemoteMemoryReader
memoryReader(task
, reader
);
3752 FastMallocZone
* mzone
= memoryReader(reinterpret_cast<FastMallocZone
*>(zoneAddress
));
3753 TCMalloc_PageHeap
* pageHeap
= memoryReader(mzone
->m_pageHeap
);
3754 TCMalloc_ThreadCache
** threadHeapsPointer
= memoryReader(mzone
->m_threadHeaps
);
3755 TCMalloc_ThreadCache
* threadHeaps
= memoryReader(*threadHeapsPointer
);
3757 TCMalloc_Central_FreeListPadded
* centralCaches
= memoryReader(mzone
->m_centralCaches
, sizeof(TCMalloc_Central_FreeListPadded
) * kNumClasses
);
3759 FreeObjectFinder
finder(memoryReader
);
3760 finder
.findFreeObjects(threadHeaps
);
3761 finder
.findFreeObjects(centralCaches
, kNumClasses
, mzone
->m_centralCaches
);
3763 TCMalloc_PageHeap::PageMap
* pageMap
= &pageHeap
->pagemap_
;
3764 PageMapFreeObjectFinder
pageMapFinder(memoryReader
, finder
);
3765 pageMap
->visit(pageMapFinder
, memoryReader
);
3767 PageMapMemoryUsageRecorder
usageRecorder(task
, context
, typeMask
, recorder
, memoryReader
, finder
);
3768 pageMap
->visit(usageRecorder
, memoryReader
);
3773 size_t FastMallocZone::size(malloc_zone_t
*, const void*)
3778 void* FastMallocZone::zoneMalloc(malloc_zone_t
*, size_t)
3783 void* FastMallocZone::zoneCalloc(malloc_zone_t
*, size_t, size_t)
3788 void FastMallocZone::zoneFree(malloc_zone_t
*, void* ptr
)
3790 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
3791 // is not in this zone. When this happens, the pointer being freed was not allocated by any
3792 // zone so we need to print a useful error for the application developer.
3793 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr
);
3796 void* FastMallocZone::zoneRealloc(malloc_zone_t
*, void*, size_t)
3808 malloc_introspection_t jscore_fastmalloc_introspection
= { &FastMallocZone::enumerate
, &FastMallocZone::goodSize
, &FastMallocZone::check
, &FastMallocZone::print
,
3809 &FastMallocZone::log
, &FastMallocZone::forceLock
, &FastMallocZone::forceUnlock
, &FastMallocZone::statistics
};
3812 FastMallocZone::FastMallocZone(TCMalloc_PageHeap
* pageHeap
, TCMalloc_ThreadCache
** threadHeaps
, TCMalloc_Central_FreeListPadded
* centralCaches
)
3813 : m_pageHeap(pageHeap
)
3814 , m_threadHeaps(threadHeaps
)
3815 , m_centralCaches(centralCaches
)
3817 memset(&m_zone
, 0, sizeof(m_zone
));
3818 m_zone
.zone_name
= "JavaScriptCore FastMalloc";
3819 m_zone
.size
= &FastMallocZone::size
;
3820 m_zone
.malloc
= &FastMallocZone::zoneMalloc
;
3821 m_zone
.calloc
= &FastMallocZone::zoneCalloc
;
3822 m_zone
.realloc
= &FastMallocZone::zoneRealloc
;
3823 m_zone
.free
= &FastMallocZone::zoneFree
;
3824 m_zone
.valloc
= &FastMallocZone::zoneValloc
;
3825 m_zone
.destroy
= &FastMallocZone::zoneDestroy
;
3826 m_zone
.introspect
= &jscore_fastmalloc_introspection
;
3827 malloc_zone_register(&m_zone
);
3831 void FastMallocZone::init()
3833 static FastMallocZone
zone(pageheap
, &thread_heaps
, static_cast<TCMalloc_Central_FreeListPadded
*>(central_cache
));
3839 void releaseFastMallocFreeMemory()
3841 // Flush free pages in the current thread cache back to the page heap.
3842 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
3843 // The second pass flushes everything.
3844 if (TCMalloc_ThreadCache
* threadCache
= TCMalloc_ThreadCache::GetCacheIfPresent()) {
3845 threadCache
->Scavenge();
3846 threadCache
->Scavenge();
3849 SpinLockHolder
h(&pageheap_lock
);
3850 pageheap
->ReleaseFreePages();
3853 FastMallocStatistics
fastMallocStatistics()
3855 FastMallocStatistics statistics
;
3857 SpinLockHolder
lockHolder(&pageheap_lock
);
3858 statistics
.heapSize
= static_cast<size_t>(pageheap
->SystemBytes());
3859 statistics
.freeSizeInHeap
= static_cast<size_t>(pageheap
->FreeBytes());
3860 statistics
.returnedSize
= pageheap
->ReturnedBytes();
3861 statistics
.freeSizeInCaches
= 0;
3862 for (TCMalloc_ThreadCache
* threadCache
= thread_heaps
; threadCache
; threadCache
= threadCache
->next_
)
3863 statistics
.freeSizeInCaches
+= threadCache
->Size();
3865 for (unsigned cl
= 0; cl
< kNumClasses
; ++cl
) {
3866 const int length
= central_cache
[cl
].length();
3867 const int tc_length
= central_cache
[cl
].tc_length();
3868 statistics
.freeSizeInCaches
+= ByteSizeForClass(cl
) * (length
+ tc_length
);
3876 #endif // FORCE_SYSTEM_MALLOC