1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "FastMalloc.h"
80 #include "Assertions.h"
82 #if ENABLE(JSC_MULTIPLE_THREADS)
86 #ifndef NO_TCMALLOC_SAMPLES
88 #define NO_TCMALLOC_SAMPLES
92 #if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
93 #define FORCE_SYSTEM_MALLOC 0
95 #define FORCE_SYSTEM_MALLOC 1
99 // Use a background thread to periodically scavenge memory to release back to the system
100 // https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
101 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
106 #if ENABLE(JSC_MULTIPLE_THREADS)
107 static pthread_key_t isForbiddenKey
;
108 static pthread_once_t isForbiddenKeyOnce
= PTHREAD_ONCE_INIT
;
109 static void initializeIsForbiddenKey()
111 pthread_key_create(&isForbiddenKey
, 0);
114 static bool isForbidden()
116 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
117 return !!pthread_getspecific(isForbiddenKey
);
120 void fastMallocForbid()
122 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
123 pthread_setspecific(isForbiddenKey
, &isForbiddenKey
);
126 void fastMallocAllow()
128 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
129 pthread_setspecific(isForbiddenKey
, 0);
134 static bool staticIsForbidden
;
135 static bool isForbidden()
137 return staticIsForbidden
;
140 void fastMallocForbid()
142 staticIsForbidden
= true;
145 void fastMallocAllow()
147 staticIsForbidden
= false;
149 #endif // ENABLE(JSC_MULTIPLE_THREADS)
158 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
162 void fastMallocMatchFailed(void*)
167 } // namespace Internal
171 void* fastZeroedMalloc(size_t n
)
173 void* result
= fastMalloc(n
);
174 memset(result
, 0, n
);
178 void* tryFastZeroedMalloc(size_t n
)
180 void* result
= tryFastMalloc(n
);
183 memset(result
, 0, n
);
189 #if FORCE_SYSTEM_MALLOC
192 #if !PLATFORM(WIN_OS)
200 void* tryFastMalloc(size_t n
)
202 ASSERT(!isForbidden());
204 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
205 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= n
) // If overflow would occur...
208 void* result
= malloc(n
+ sizeof(AllocAlignmentInteger
));
212 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
213 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
221 void* fastMalloc(size_t n
)
223 ASSERT(!isForbidden());
225 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
226 void* result
= tryFastMalloc(n
);
228 void* result
= malloc(n
);
236 void* tryFastCalloc(size_t n_elements
, size_t element_size
)
238 ASSERT(!isForbidden());
240 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
241 size_t totalBytes
= n_elements
* element_size
;
242 if (n_elements
> 1 && element_size
&& (totalBytes
/ element_size
) != n_elements
|| (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= totalBytes
))
245 totalBytes
+= sizeof(AllocAlignmentInteger
);
246 void* result
= malloc(totalBytes
);
250 memset(result
, 0, totalBytes
);
251 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
252 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
255 return calloc(n_elements
, element_size
);
259 void* fastCalloc(size_t n_elements
, size_t element_size
)
261 ASSERT(!isForbidden());
263 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
264 void* result
= tryFastCalloc(n_elements
, element_size
);
266 void* result
= calloc(n_elements
, element_size
);
274 void fastFree(void* p
)
276 ASSERT(!isForbidden());
278 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
282 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(p
);
283 if (*header
!= Internal::AllocTypeMalloc
)
284 Internal::fastMallocMatchFailed(p
);
291 void* tryFastRealloc(void* p
, size_t n
)
293 ASSERT(!isForbidden());
295 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
297 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= n
) // If overflow would occur...
299 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(p
);
300 if (*header
!= Internal::AllocTypeMalloc
)
301 Internal::fastMallocMatchFailed(p
);
302 void* result
= realloc(header
, n
+ sizeof(AllocAlignmentInteger
));
306 // This should not be needed because the value is already there:
307 // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
308 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
311 return fastMalloc(n
);
314 return realloc(p
, n
);
318 void* fastRealloc(void* p
, size_t n
)
320 ASSERT(!isForbidden());
322 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
323 void* result
= tryFastRealloc(p
, n
);
325 void* result
= realloc(p
, n
);
333 void releaseFastMallocFreeMemory() { }
335 FastMallocStatistics
fastMallocStatistics()
337 FastMallocStatistics statistics
= { 0, 0, 0, 0 };
344 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
345 // It will never be used in this case, so it's type and value are less interesting than its presence.
346 extern "C" const int jscore_fastmalloc_introspection
= 0;
349 #else // FORCE_SYSTEM_MALLOC
353 #elif HAVE(INTTYPES_H)
354 #include <inttypes.h>
356 #include <sys/types.h>
359 #include "AlwaysInline.h"
360 #include "Assertions.h"
361 #include "TCPackedCache.h"
362 #include "TCPageMap.h"
363 #include "TCSpinLock.h"
364 #include "TCSystemAlloc.h"
374 #ifndef WIN32_LEAN_AND_MEAN
375 #define WIN32_LEAN_AND_MEAN
383 #include "MallocZoneSupport.h"
384 #include <wtf/HashSet.h>
391 // Calling pthread_getspecific through a global function pointer is faster than a normal
392 // call to the function on Mac OS X, and it's used in performance-critical code. So we
393 // use a function pointer. But that's not necessarily faster on other platforms, and we had
394 // problems with this technique on Windows, so we'll do this only on Mac OS X.
396 static void* (*pthread_getspecific_function_pointer
)(pthread_key_t
) = pthread_getspecific
;
397 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
400 #define DEFINE_VARIABLE(type, name, value, meaning) \
401 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
402 type FLAGS_##name(value); \
403 char FLAGS_no##name; \
405 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
407 #define DEFINE_int64(name, value, meaning) \
408 DEFINE_VARIABLE(int64_t, name, value, meaning)
410 #define DEFINE_double(name, value, meaning) \
411 DEFINE_VARIABLE(double, name, value, meaning)
415 #define malloc fastMalloc
416 #define calloc fastCalloc
417 #define free fastFree
418 #define realloc fastRealloc
420 #define MESSAGE LOG_ERROR
421 #define CHECK_CONDITION ASSERT
425 class TCMalloc_Central_FreeListPadded
;
426 class TCMalloc_PageHeap
;
427 class TCMalloc_ThreadCache
;
428 template <typename T
> class PageHeapAllocator
;
430 class FastMallocZone
{
434 static kern_return_t
enumerate(task_t
, void*, unsigned typeMmask
, vm_address_t zoneAddress
, memory_reader_t
, vm_range_recorder_t
);
435 static size_t goodSize(malloc_zone_t
*, size_t size
) { return size
; }
436 static boolean_t
check(malloc_zone_t
*) { return true; }
437 static void print(malloc_zone_t
*, boolean_t
) { }
438 static void log(malloc_zone_t
*, void*) { }
439 static void forceLock(malloc_zone_t
*) { }
440 static void forceUnlock(malloc_zone_t
*) { }
441 static void statistics(malloc_zone_t
*, malloc_statistics_t
* stats
) { memset(stats
, 0, sizeof(malloc_statistics_t
)); }
444 FastMallocZone(TCMalloc_PageHeap
*, TCMalloc_ThreadCache
**, TCMalloc_Central_FreeListPadded
*, PageHeapAllocator
<Span
>*, PageHeapAllocator
<TCMalloc_ThreadCache
>*);
445 static size_t size(malloc_zone_t
*, const void*);
446 static void* zoneMalloc(malloc_zone_t
*, size_t);
447 static void* zoneCalloc(malloc_zone_t
*, size_t numItems
, size_t size
);
448 static void zoneFree(malloc_zone_t
*, void*);
449 static void* zoneRealloc(malloc_zone_t
*, void*, size_t);
450 static void* zoneValloc(malloc_zone_t
*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
451 static void zoneDestroy(malloc_zone_t
*) { }
453 malloc_zone_t m_zone
;
454 TCMalloc_PageHeap
* m_pageHeap
;
455 TCMalloc_ThreadCache
** m_threadHeaps
;
456 TCMalloc_Central_FreeListPadded
* m_centralCaches
;
457 PageHeapAllocator
<Span
>* m_spanAllocator
;
458 PageHeapAllocator
<TCMalloc_ThreadCache
>* m_pageHeapAllocator
;
466 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
467 // you're porting to a system where you really can't get a stacktrace.
468 #ifdef NO_TCMALLOC_SAMPLES
469 // We use #define so code compiles even if you #include stacktrace.h somehow.
470 # define GetStackTrace(stack, depth, skip) (0)
472 # include <google/stacktrace.h>
476 // Even if we have support for thread-local storage in the compiler
477 // and linker, the OS may not support it. We need to check that at
478 // runtime. Right now, we have to keep a manual set of "bad" OSes.
479 #if defined(HAVE_TLS)
480 static bool kernel_supports_tls
= false; // be conservative
481 static inline bool KernelSupportsTLS() {
482 return kernel_supports_tls
;
484 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
485 static void CheckIfKernelSupportsTLS() {
486 kernel_supports_tls
= false;
489 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
490 static void CheckIfKernelSupportsTLS() {
492 if (uname(&buf
) != 0) { // should be impossible
493 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno
);
494 kernel_supports_tls
= false;
495 } else if (strcasecmp(buf
.sysname
, "linux") == 0) {
496 // The linux case: the first kernel to support TLS was 2.6.0
497 if (buf
.release
[0] < '2' && buf
.release
[1] == '.') // 0.x or 1.x
498 kernel_supports_tls
= false;
499 else if (buf
.release
[0] == '2' && buf
.release
[1] == '.' &&
500 buf
.release
[2] >= '0' && buf
.release
[2] < '6' &&
501 buf
.release
[3] == '.') // 2.0 - 2.5
502 kernel_supports_tls
= false;
504 kernel_supports_tls
= true;
505 } else { // some other kernel, we'll be optimisitic
506 kernel_supports_tls
= true;
508 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
510 # endif // HAVE_DECL_UNAME
513 // __THROW is defined in glibc systems. It means, counter-intuitively,
514 // "This function will never throw an exception." It's an optional
515 // optimization tool, but we may need to use it to match glibc prototypes.
516 #ifndef __THROW // I guess we're not on a glibc system
517 # define __THROW // __THROW is just an optimization, so ok to make it ""
520 //-------------------------------------------------------------------
522 //-------------------------------------------------------------------
524 // Not all possible combinations of the following parameters make
525 // sense. In particular, if kMaxSize increases, you may have to
526 // increase kNumClasses as well.
527 static const size_t kPageShift
= 12;
528 static const size_t kPageSize
= 1 << kPageShift
;
529 static const size_t kMaxSize
= 8u * kPageSize
;
530 static const size_t kAlignShift
= 3;
531 static const size_t kAlignment
= 1 << kAlignShift
;
532 static const size_t kNumClasses
= 68;
534 // Allocates a big block of memory for the pagemap once we reach more than
536 static const size_t kPageMapBigAllocationThreshold
= 128 << 20;
538 // Minimum number of pages to fetch from system at a time. Must be
539 // significantly bigger than kBlockSize to amortize system-call
540 // overhead, and also to reduce external fragementation. Also, we
541 // should keep this value big because various incarnations of Linux
542 // have small limits on the number of mmap() regions per
544 static const size_t kMinSystemAlloc
= 1 << (20 - kPageShift
);
546 // Number of objects to move between a per-thread list and a central
547 // list in one shot. We want this to be not too small so we can
548 // amortize the lock overhead for accessing the central list. Making
549 // it too big may temporarily cause unnecessary memory wastage in the
550 // per-thread free list until the scavenger cleans up the list.
551 static int num_objects_to_move
[kNumClasses
];
553 // Maximum length we allow a per-thread free-list to have before we
554 // move objects from it into the corresponding central free-list. We
555 // want this big to avoid locking the central free-list too often. It
556 // should not hurt to make this list somewhat big because the
557 // scavenging code will shrink it down when its contents are not in use.
558 static const int kMaxFreeListLength
= 256;
560 // Lower and upper bounds on the per-thread cache sizes
561 static const size_t kMinThreadCacheSize
= kMaxSize
* 2;
562 static const size_t kMaxThreadCacheSize
= 512 * 1024;
564 // Default bound on the total amount of thread caches
565 static const size_t kDefaultOverallThreadCacheSize
= 16 << 20;
567 // For all span-lengths < kMaxPages we keep an exact-size list.
568 // REQUIRED: kMaxPages >= kMinSystemAlloc;
569 static const size_t kMaxPages
= kMinSystemAlloc
;
571 /* The smallest prime > 2^n */
572 static int primes_list
[] = {
573 // Small values might cause high rates of sampling
574 // and hence commented out.
575 // 2, 5, 11, 17, 37, 67, 131, 257,
576 // 521, 1031, 2053, 4099, 8209, 16411,
577 32771, 65537, 131101, 262147, 524309, 1048583,
578 2097169, 4194319, 8388617, 16777259, 33554467 };
580 // Twice the approximate gap between sampling actions.
581 // I.e., we take one sample approximately once every
582 // tcmalloc_sample_parameter/2
583 // bytes of allocation, i.e., ~ once every 128KB.
584 // Must be a prime number.
585 #ifdef NO_TCMALLOC_SAMPLES
586 DEFINE_int64(tcmalloc_sample_parameter
, 0,
587 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
588 static size_t sample_period
= 0;
590 DEFINE_int64(tcmalloc_sample_parameter
, 262147,
591 "Twice the approximate gap between sampling actions."
592 " Must be a prime number. Otherwise will be rounded up to a "
593 " larger prime number");
594 static size_t sample_period
= 262147;
597 // Protects sample_period above
598 static SpinLock sample_period_lock
= SPINLOCK_INITIALIZER
;
600 // Parameters for controlling how fast memory is returned to the OS.
602 DEFINE_double(tcmalloc_release_rate
, 1,
603 "Rate at which we release unused memory to the system. "
604 "Zero means we never release memory back to the system. "
605 "Increase this flag to return memory faster; decrease it "
606 "to return memory slower. Reasonable rates are in the "
609 //-------------------------------------------------------------------
610 // Mapping from size to size_class and vice versa
611 //-------------------------------------------------------------------
613 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
614 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
615 // So for these larger sizes we have an array indexed by ceil(size/128).
617 // We flatten both logical arrays into one physical array and use
618 // arithmetic to compute an appropriate index. The constants used by
619 // ClassIndex() were selected to make the flattening work.
622 // Size Expression Index
623 // -------------------------------------------------------
627 // 1024 (1024 + 7) / 8 128
628 // 1025 (1025 + 127 + (120<<7)) / 128 129
630 // 32768 (32768 + 127 + (120<<7)) / 128 376
631 static const size_t kMaxSmallSize
= 1024;
632 static const int shift_amount
[2] = { 3, 7 }; // For divides by 8 or 128
633 static const int add_amount
[2] = { 7, 127 + (120 << 7) };
634 static unsigned char class_array
[377];
636 // Compute index of the class_array[] entry for a given size
637 static inline int ClassIndex(size_t s
) {
638 const int i
= (s
> kMaxSmallSize
);
639 return static_cast<int>((s
+ add_amount
[i
]) >> shift_amount
[i
]);
642 // Mapping from size class to max size storable in that class
643 static size_t class_to_size
[kNumClasses
];
645 // Mapping from size class to number of pages to allocate at a time
646 static size_t class_to_pages
[kNumClasses
];
648 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
649 // back and forth between thread caches and the central cache for a given size
652 void *head
; // Head of chain of objects.
653 void *tail
; // Tail of chain of objects.
655 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
656 // slots to put link list chains into. To keep memory usage bounded the total
657 // number of TCEntries across size classes is fixed. Currently each size
658 // class is initially given one TCEntry which also means that the maximum any
659 // one class can have is kNumClasses.
660 static const int kNumTransferEntries
= kNumClasses
;
662 // Note: the following only works for "n"s that fit in 32-bits, but
663 // that is fine since we only use it for small sizes.
664 static inline int LgFloor(size_t n
) {
666 for (int i
= 4; i
>= 0; --i
) {
667 int shift
= (1 << i
);
668 size_t x
= n
>> shift
;
678 // Some very basic linked list functions for dealing with using void * as
681 static inline void *SLL_Next(void *t
) {
682 return *(reinterpret_cast<void**>(t
));
685 static inline void SLL_SetNext(void *t
, void *n
) {
686 *(reinterpret_cast<void**>(t
)) = n
;
689 static inline void SLL_Push(void **list
, void *element
) {
690 SLL_SetNext(element
, *list
);
694 static inline void *SLL_Pop(void **list
) {
695 void *result
= *list
;
696 *list
= SLL_Next(*list
);
701 // Remove N elements from a linked list to which head points. head will be
702 // modified to point to the new head. start and end will point to the first
703 // and last nodes of the range. Note that end will point to NULL after this
704 // function is called.
705 static inline void SLL_PopRange(void **head
, int N
, void **start
, void **end
) {
713 for (int i
= 1; i
< N
; ++i
) {
719 *head
= SLL_Next(tmp
);
720 // Unlink range from list.
721 SLL_SetNext(tmp
, NULL
);
724 static inline void SLL_PushRange(void **head
, void *start
, void *end
) {
726 SLL_SetNext(end
, *head
);
730 static inline size_t SLL_Size(void *head
) {
734 head
= SLL_Next(head
);
739 // Setup helper functions.
741 static ALWAYS_INLINE
size_t SizeClass(size_t size
) {
742 return class_array
[ClassIndex(size
)];
745 // Get the byte-size for a specified class
746 static ALWAYS_INLINE
size_t ByteSizeForClass(size_t cl
) {
747 return class_to_size
[cl
];
749 static int NumMoveSize(size_t size
) {
750 if (size
== 0) return 0;
751 // Use approx 64k transfers between thread and central caches.
752 int num
= static_cast<int>(64.0 * 1024.0 / size
);
753 if (num
< 2) num
= 2;
754 // Clamp well below kMaxFreeListLength to avoid ping pong between central
755 // and thread caches.
756 if (num
> static_cast<int>(0.8 * kMaxFreeListLength
))
757 num
= static_cast<int>(0.8 * kMaxFreeListLength
);
759 // Also, avoid bringing in too many objects into small object free
760 // lists. There are lots of such lists, and if we allow each one to
761 // fetch too many at a time, we end up having to scavenge too often
762 // (especially when there are lots of threads and each thread gets a
763 // small allowance for its thread cache).
765 // TODO: Make thread cache free list sizes dynamic so that we do not
766 // have to equally divide a fixed resource amongst lots of threads.
767 if (num
> 32) num
= 32;
772 // Initialize the mapping arrays
773 static void InitSizeClasses() {
774 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
775 if (ClassIndex(0) < 0) {
776 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
779 if (static_cast<size_t>(ClassIndex(kMaxSize
)) >= sizeof(class_array
)) {
780 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize
));
784 // Compute the size classes we want to use
785 size_t sc
= 1; // Next size class to assign
786 unsigned char alignshift
= kAlignShift
;
788 for (size_t size
= kAlignment
; size
<= kMaxSize
; size
+= (1 << alignshift
)) {
789 int lg
= LgFloor(size
);
791 // Increase alignment every so often.
793 // Since we double the alignment every time size doubles and
794 // size >= 128, this means that space wasted due to alignment is
795 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
796 // bytes, so the space wasted as a percentage starts falling for
798 if ((lg
>= 7) && (alignshift
< 8)) {
804 // Allocate enough pages so leftover is less than 1/8 of total.
805 // This bounds wasted space to at most 12.5%.
806 size_t psize
= kPageSize
;
807 while ((psize
% size
) > (psize
>> 3)) {
810 const size_t my_pages
= psize
>> kPageShift
;
812 if (sc
> 1 && my_pages
== class_to_pages
[sc
-1]) {
813 // See if we can merge this into the previous class without
814 // increasing the fragmentation of the previous class.
815 const size_t my_objects
= (my_pages
<< kPageShift
) / size
;
816 const size_t prev_objects
= (class_to_pages
[sc
-1] << kPageShift
)
817 / class_to_size
[sc
-1];
818 if (my_objects
== prev_objects
) {
819 // Adjust last class to include this size
820 class_to_size
[sc
-1] = size
;
826 class_to_pages
[sc
] = my_pages
;
827 class_to_size
[sc
] = size
;
830 if (sc
!= kNumClasses
) {
831 MESSAGE("wrong number of size classes: found %" PRIuS
" instead of %d\n",
832 sc
, int(kNumClasses
));
836 // Initialize the mapping arrays
838 for (unsigned char c
= 1; c
< kNumClasses
; c
++) {
839 const size_t max_size_in_class
= class_to_size
[c
];
840 for (size_t s
= next_size
; s
<= max_size_in_class
; s
+= kAlignment
) {
841 class_array
[ClassIndex(s
)] = c
;
843 next_size
= static_cast<int>(max_size_in_class
+ kAlignment
);
846 // Double-check sizes just to be safe
847 for (size_t size
= 0; size
<= kMaxSize
; size
++) {
848 const size_t sc
= SizeClass(size
);
850 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
853 if (sc
> 1 && size
<= class_to_size
[sc
-1]) {
854 MESSAGE("Allocating unnecessarily large class %" PRIuS
" for %" PRIuS
858 if (sc
>= kNumClasses
) {
859 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
862 const size_t s
= class_to_size
[sc
];
864 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
868 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
873 // Initialize the num_objects_to_move array.
874 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
875 num_objects_to_move
[cl
] = NumMoveSize(ByteSizeForClass(cl
));
880 // Dump class sizes and maximum external wastage per size class
881 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
882 const int alloc_size
= class_to_pages
[cl
] << kPageShift
;
883 const int alloc_objs
= alloc_size
/ class_to_size
[cl
];
884 const int min_used
= (class_to_size
[cl
-1] + 1) * alloc_objs
;
885 const int max_waste
= alloc_size
- min_used
;
886 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
888 int(class_to_size
[cl
-1] + 1),
889 int(class_to_size
[cl
]),
890 int(class_to_pages
[cl
] << kPageShift
),
891 max_waste
* 100.0 / alloc_size
898 // -------------------------------------------------------------------------
899 // Simple allocator for objects of a specified type. External locking
900 // is required before accessing one of these objects.
901 // -------------------------------------------------------------------------
903 // Metadata allocator -- keeps stats about how many bytes allocated
904 static uint64_t metadata_system_bytes
= 0;
905 static void* MetaDataAlloc(size_t bytes
) {
906 void* result
= TCMalloc_SystemAlloc(bytes
, 0);
907 if (result
!= NULL
) {
908 metadata_system_bytes
+= bytes
;
914 class PageHeapAllocator
{
916 // How much to allocate from system at a time
917 static const size_t kAllocIncrement
= 32 << 10;
920 static const size_t kAlignedSize
921 = (((sizeof(T
) + kAlignment
- 1) / kAlignment
) * kAlignment
);
923 // Free area from which to carve new objects
927 // Linked list of all regions allocated by this allocator
928 void* allocated_regions_
;
930 // Free list of already carved objects
933 // Number of allocated but unfreed objects
938 ASSERT(kAlignedSize
<= kAllocIncrement
);
940 allocated_regions_
= 0;
949 if (free_list_
!= NULL
) {
951 free_list_
= *(reinterpret_cast<void**>(result
));
953 if (free_avail_
< kAlignedSize
) {
955 char* new_allocation
= reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement
));
959 *(void**)new_allocation
= allocated_regions_
;
960 allocated_regions_
= new_allocation
;
961 free_area_
= new_allocation
+ kAlignedSize
;
962 free_avail_
= kAllocIncrement
- kAlignedSize
;
965 free_area_
+= kAlignedSize
;
966 free_avail_
-= kAlignedSize
;
969 return reinterpret_cast<T
*>(result
);
973 *(reinterpret_cast<void**>(p
)) = free_list_
;
978 int inuse() const { return inuse_
; }
980 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
981 template <class Recorder
>
982 void recordAdministrativeRegions(Recorder
& recorder
, const RemoteMemoryReader
& reader
)
984 vm_address_t adminAllocation
= reinterpret_cast<vm_address_t
>(allocated_regions_
);
985 while (adminAllocation
) {
986 recorder
.recordRegion(adminAllocation
, kAllocIncrement
);
987 adminAllocation
= *reader(reinterpret_cast<vm_address_t
*>(adminAllocation
));
993 // -------------------------------------------------------------------------
994 // Span - a contiguous run of pages
995 // -------------------------------------------------------------------------
997 // Type that can hold a page number
998 typedef uintptr_t PageID
;
1000 // Type that can hold the length of a run of pages
1001 typedef uintptr_t Length
;
1003 static const Length kMaxValidPages
= (~static_cast<Length
>(0)) >> kPageShift
;
1005 // Convert byte size into pages. This won't overflow, but may return
1006 // an unreasonably large value if bytes is huge enough.
1007 static inline Length
pages(size_t bytes
) {
1008 return (bytes
>> kPageShift
) +
1009 ((bytes
& (kPageSize
- 1)) > 0 ? 1 : 0);
1012 // Convert a user size into the number of bytes that will actually be
1014 static size_t AllocationSize(size_t bytes
) {
1015 if (bytes
> kMaxSize
) {
1016 // Large object: we allocate an integral number of pages
1017 ASSERT(bytes
<= (kMaxValidPages
<< kPageShift
));
1018 return pages(bytes
) << kPageShift
;
1020 // Small object: find the size class to which it belongs
1021 return ByteSizeForClass(SizeClass(bytes
));
1025 // Information kept for a span (a contiguous run of pages).
1027 PageID start
; // Starting page number
1028 Length length
; // Number of pages in span
1029 Span
* next
; // Used when in link list
1030 Span
* prev
; // Used when in link list
1031 void* objects
; // Linked list of free objects
1032 unsigned int free
: 1; // Is the span free
1033 #ifndef NO_TCMALLOC_SAMPLES
1034 unsigned int sample
: 1; // Sampled object?
1036 unsigned int sizeclass
: 8; // Size-class for small objects (or 0)
1037 unsigned int refcount
: 11; // Number of non-free objects
1038 bool decommitted
: 1;
1042 // For debugging, we can keep a log events per span
1049 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
1052 void Event(Span
* span
, char op
, int v
= 0) {
1053 span
->history
[span
->nexthistory
] = op
;
1054 span
->value
[span
->nexthistory
] = v
;
1055 span
->nexthistory
++;
1056 if (span
->nexthistory
== sizeof(span
->history
)) span
->nexthistory
= 0;
1059 #define Event(s,o,v) ((void) 0)
1062 // Allocator/deallocator for spans
1063 static PageHeapAllocator
<Span
> span_allocator
;
1064 static Span
* NewSpan(PageID p
, Length len
) {
1065 Span
* result
= span_allocator
.New();
1066 memset(result
, 0, sizeof(*result
));
1068 result
->length
= len
;
1070 result
->nexthistory
= 0;
1075 static inline void DeleteSpan(Span
* span
) {
1077 // In debug mode, trash the contents of deleted Spans
1078 memset(span
, 0x3f, sizeof(*span
));
1080 span_allocator
.Delete(span
);
1083 // -------------------------------------------------------------------------
1084 // Doubly linked list of spans.
1085 // -------------------------------------------------------------------------
1087 static inline void DLL_Init(Span
* list
) {
1092 static inline void DLL_Remove(Span
* span
) {
1093 span
->prev
->next
= span
->next
;
1094 span
->next
->prev
= span
->prev
;
1099 static ALWAYS_INLINE
bool DLL_IsEmpty(const Span
* list
) {
1100 return list
->next
== list
;
1103 static int DLL_Length(const Span
* list
) {
1105 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1111 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1112 static void DLL_Print(const char* label
, const Span
* list
) {
1113 MESSAGE("%-10s %p:", label
, list
);
1114 for (const Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1115 MESSAGE(" <%p,%u,%u>", s
, s
->start
, s
->length
);
1121 static inline void DLL_Prepend(Span
* list
, Span
* span
) {
1122 ASSERT(span
->next
== NULL
);
1123 ASSERT(span
->prev
== NULL
);
1124 span
->next
= list
->next
;
1126 list
->next
->prev
= span
;
1130 // -------------------------------------------------------------------------
1131 // Stack traces kept for sampled allocations
1132 // The following state is protected by pageheap_lock_.
1133 // -------------------------------------------------------------------------
1135 // size/depth are made the same size as a pointer so that some generic
1136 // code below can conveniently cast them back and forth to void*.
1137 static const int kMaxStackDepth
= 31;
1139 uintptr_t size
; // Size of object
1140 uintptr_t depth
; // Number of PC values stored in array below
1141 void* stack
[kMaxStackDepth
];
1143 static PageHeapAllocator
<StackTrace
> stacktrace_allocator
;
1144 static Span sampled_objects
;
1146 // -------------------------------------------------------------------------
1147 // Map from page-id to per-page data
1148 // -------------------------------------------------------------------------
1150 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1151 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1152 // because sometimes the sizeclass is all the information we need.
1154 // Selector class -- general selector uses 3-level map
1155 template <int BITS
> class MapSelector
{
1157 typedef TCMalloc_PageMap3
<BITS
-kPageShift
> Type
;
1158 typedef PackedCache
<BITS
, uint64_t> CacheType
;
1161 #if defined(WTF_CHANGES)
1162 #if PLATFORM(X86_64)
1163 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1164 // can be excluded from the PageMap key.
1165 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1167 static const size_t kBitsUnusedOn64Bit
= 16;
1169 static const size_t kBitsUnusedOn64Bit
= 0;
1172 // A three-level map for 64-bit machines
1173 template <> class MapSelector
<64> {
1175 typedef TCMalloc_PageMap3
<64 - kPageShift
- kBitsUnusedOn64Bit
> Type
;
1176 typedef PackedCache
<64, uint64_t> CacheType
;
1180 // A two-level map for 32-bit machines
1181 template <> class MapSelector
<32> {
1183 typedef TCMalloc_PageMap2
<32 - kPageShift
> Type
;
1184 typedef PackedCache
<32 - kPageShift
, uint16_t> CacheType
;
1187 // -------------------------------------------------------------------------
1188 // Page-level allocator
1189 // * Eager coalescing
1191 // Heap for page-level allocation. We allow allocating and freeing a
1192 // contiguous runs of pages (called a "span").
1193 // -------------------------------------------------------------------------
1195 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1196 // The central page heap collects spans of memory that have been deleted but are still committed until they are released
1197 // back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
1198 // system. Every 5 seconds, the background thread wakes up and does the following:
1199 // - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
1200 // of free committed pages and so we should not release them back to the system yet.
1201 // - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
1202 // back to the system.
1203 // - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
1204 // scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
1206 // Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
1207 static const int kScavengeTimerDelayInSeconds
= 5;
1209 // Number of free committed pages that we want to keep around.
1210 static const size_t kMinimumFreeCommittedPageCount
= 512;
1212 // During a scavenge, we'll release up to a fraction of the free committed pages.
1214 // We are slightly less aggressive in releasing memory on Windows due to performance reasons.
1215 static const int kMaxScavengeAmountFactor
= 3;
1217 static const int kMaxScavengeAmountFactor
= 2;
1221 class TCMalloc_PageHeap
{
1225 // Allocate a run of "n" pages. Returns zero if out of memory.
1226 Span
* New(Length n
);
1228 // Delete the span "[p, p+n-1]".
1229 // REQUIRES: span was returned by earlier call to New() and
1230 // has not yet been deleted.
1231 void Delete(Span
* span
);
1233 // Mark an allocated span as being used for small objects of the
1234 // specified size-class.
1235 // REQUIRES: span was returned by an earlier call to New()
1236 // and has not yet been deleted.
1237 void RegisterSizeClass(Span
* span
, size_t sc
);
1239 // Split an allocated span into two spans: one of length "n" pages
1240 // followed by another span of length "span->length - n" pages.
1241 // Modifies "*span" to point to the first span of length "n" pages.
1242 // Returns a pointer to the second span.
1244 // REQUIRES: "0 < n < span->length"
1245 // REQUIRES: !span->free
1246 // REQUIRES: span->sizeclass == 0
1247 Span
* Split(Span
* span
, Length n
);
1249 // Return the descriptor for the specified page.
1250 inline Span
* GetDescriptor(PageID p
) const {
1251 return reinterpret_cast<Span
*>(pagemap_
.get(p
));
1255 inline Span
* GetDescriptorEnsureSafe(PageID p
)
1257 pagemap_
.Ensure(p
, 1);
1258 return GetDescriptor(p
);
1261 size_t ReturnedBytes() const;
1264 // Dump state to stderr
1266 void Dump(TCMalloc_Printer
* out
);
1269 // Return number of bytes allocated from system
1270 inline uint64_t SystemBytes() const { return system_bytes_
; }
1272 // Return number of free bytes in heap
1273 uint64_t FreeBytes() const {
1274 return (static_cast<uint64_t>(free_pages_
) << kPageShift
);
1278 bool CheckList(Span
* list
, Length min_pages
, Length max_pages
);
1280 // Release all pages on the free list for reuse by the OS:
1281 void ReleaseFreePages();
1283 // Return 0 if we have no information, or else the correct sizeclass for p.
1284 // Reads and writes to pagemap_cache_ do not require locking.
1285 // The entries are 64 bits on 64-bit hardware and 16 bits on
1286 // 32-bit hardware, and we don't mind raciness as long as each read of
1287 // an entry yields a valid entry, not a partially updated entry.
1288 size_t GetSizeClassIfCached(PageID p
) const {
1289 return pagemap_cache_
.GetOrDefault(p
, 0);
1291 void CacheSizeClass(PageID p
, size_t cl
) const { pagemap_cache_
.Put(p
, cl
); }
1294 // Pick the appropriate map and cache types based on pointer size
1295 typedef MapSelector
<8*sizeof(uintptr_t)>::Type PageMap
;
1296 typedef MapSelector
<8*sizeof(uintptr_t)>::CacheType PageMapCache
;
1298 mutable PageMapCache pagemap_cache_
;
1300 // We segregate spans of a given size into two circular linked
1301 // lists: one for normal spans, and one for spans whose memory
1302 // has been returned to the system.
1308 // List of free spans of length >= kMaxPages
1311 // Array mapping from span length to a doubly linked list of free spans
1312 SpanList free_
[kMaxPages
];
1314 // Number of pages kept in free lists
1315 uintptr_t free_pages_
;
1317 // Bytes allocated from system
1318 uint64_t system_bytes_
;
1320 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1321 // Number of pages kept in free lists that are still committed.
1322 Length free_committed_pages_
;
1324 // Number of pages that we committed in the last scavenge wait interval.
1325 Length pages_committed_since_last_scavenge_
;
1328 bool GrowHeap(Length n
);
1330 // REQUIRES span->length >= n
1331 // Remove span from its free list, and move any leftover part of
1332 // span into appropriate free lists. Also update "span" to have
1333 // length exactly "n" and mark it as non-free so it can be returned
1336 // "released" is true iff "span" was found on a "returned" list.
1337 void Carve(Span
* span
, Length n
, bool released
);
1339 void RecordSpan(Span
* span
) {
1340 pagemap_
.set(span
->start
, span
);
1341 if (span
->length
> 1) {
1342 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1346 // Allocate a large span of length == n. If successful, returns a
1347 // span of exactly the specified length. Else, returns NULL.
1348 Span
* AllocLarge(Length n
);
1350 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1351 // Incrementally release some memory to the system.
1352 // IncrementalScavenge(n) is called whenever n pages are freed.
1353 void IncrementalScavenge(Length n
);
1356 // Number of pages to deallocate before doing more scavenging
1357 int64_t scavenge_counter_
;
1359 // Index of last free list we scavenged
1360 size_t scavenge_index_
;
1362 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
1363 friend class FastMallocZone
;
1366 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1367 static NO_RETURN
void* runScavengerThread(void*);
1369 NO_RETURN
void scavengerThread();
1373 inline bool shouldContinueScavenging() const;
1375 pthread_mutex_t m_scavengeMutex
;
1377 pthread_cond_t m_scavengeCondition
;
1379 // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
1380 // it's blocked waiting for more pages to be deleted.
1381 bool m_scavengeThreadActive
;
1382 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1385 void TCMalloc_PageHeap::init()
1387 pagemap_
.init(MetaDataAlloc
);
1388 pagemap_cache_
= PageMapCache(0);
1392 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1393 free_committed_pages_
= 0;
1394 pages_committed_since_last_scavenge_
= 0;
1395 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1397 scavenge_counter_
= 0;
1398 // Start scavenging at kMaxPages list
1399 scavenge_index_
= kMaxPages
-1;
1400 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
1401 DLL_Init(&large_
.normal
);
1402 DLL_Init(&large_
.returned
);
1403 for (size_t i
= 0; i
< kMaxPages
; i
++) {
1404 DLL_Init(&free_
[i
].normal
);
1405 DLL_Init(&free_
[i
].returned
);
1408 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1409 pthread_mutex_init(&m_scavengeMutex
, 0);
1410 pthread_cond_init(&m_scavengeCondition
, 0);
1411 m_scavengeThreadActive
= true;
1413 pthread_create(&thread
, 0, runScavengerThread
, this);
1414 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1417 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1418 void* TCMalloc_PageHeap::runScavengerThread(void* context
)
1420 static_cast<TCMalloc_PageHeap
*>(context
)->scavengerThread();
1422 // Without this, Visual Studio will complain that this method does not return a value.
1427 void TCMalloc_PageHeap::scavenge()
1429 // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
1430 // for the amount of allocations that we do. So hold off on releasing memory back to the system.
1431 if (pages_committed_since_last_scavenge_
> 0) {
1432 pages_committed_since_last_scavenge_
= 0;
1435 Length pagesDecommitted
= 0;
1436 for (int i
= kMaxPages
; i
>= 0; i
--) {
1437 SpanList
* slist
= (static_cast<size_t>(i
) == kMaxPages
) ? &large_
: &free_
[i
];
1438 if (!DLL_IsEmpty(&slist
->normal
)) {
1439 // Release the last span on the normal portion of this list
1440 Span
* s
= slist
->normal
.prev
;
1441 // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
1442 if ((pagesDecommitted
+ s
->length
) * kMaxScavengeAmountFactor
> free_committed_pages_
)
1445 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1446 static_cast<size_t>(s
->length
<< kPageShift
));
1447 if (!s
->decommitted
) {
1448 pagesDecommitted
+= s
->length
;
1449 s
->decommitted
= true;
1451 DLL_Prepend(&slist
->returned
, s
);
1452 // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
1453 if (free_committed_pages_
<= kMinimumFreeCommittedPageCount
+ pagesDecommitted
)
1457 pages_committed_since_last_scavenge_
= 0;
1458 ASSERT(free_committed_pages_
>= pagesDecommitted
);
1459 free_committed_pages_
-= pagesDecommitted
;
1462 inline bool TCMalloc_PageHeap::shouldContinueScavenging() const
1464 return free_committed_pages_
> kMinimumFreeCommittedPageCount
;
1467 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1469 inline Span
* TCMalloc_PageHeap::New(Length n
) {
1473 // Find first size >= n that has a non-empty list
1474 for (Length s
= n
; s
< kMaxPages
; s
++) {
1476 bool released
= false;
1477 if (!DLL_IsEmpty(&free_
[s
].normal
)) {
1478 // Found normal span
1479 ll
= &free_
[s
].normal
;
1480 } else if (!DLL_IsEmpty(&free_
[s
].returned
)) {
1481 // Found returned span; reallocate it
1482 ll
= &free_
[s
].returned
;
1485 // Keep looking in larger classes
1489 Span
* result
= ll
->next
;
1490 Carve(result
, n
, released
);
1491 if (result
->decommitted
) {
1492 TCMalloc_SystemCommit(reinterpret_cast<void*>(result
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1493 result
->decommitted
= false;
1494 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1495 pages_committed_since_last_scavenge_
+= n
;
1498 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1500 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1501 // free committed pages count.
1502 ASSERT(free_committed_pages_
>= n
);
1503 free_committed_pages_
-= n
;
1505 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1511 Span
* result
= AllocLarge(n
);
1512 if (result
!= NULL
) {
1513 ASSERT_SPAN_COMMITTED(result
);
1517 // Grow the heap and try again
1523 return AllocLarge(n
);
1526 Span
* TCMalloc_PageHeap::AllocLarge(Length n
) {
1527 // find the best span (closest to n in size).
1528 // The following loops implements address-ordered best-fit.
1529 bool from_released
= false;
1532 // Search through normal list
1533 for (Span
* span
= large_
.normal
.next
;
1534 span
!= &large_
.normal
;
1535 span
= span
->next
) {
1536 if (span
->length
>= n
) {
1538 || (span
->length
< best
->length
)
1539 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1541 from_released
= false;
1546 // Search through released list in case it has a better fit
1547 for (Span
* span
= large_
.returned
.next
;
1548 span
!= &large_
.returned
;
1549 span
= span
->next
) {
1550 if (span
->length
>= n
) {
1552 || (span
->length
< best
->length
)
1553 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1555 from_released
= true;
1561 Carve(best
, n
, from_released
);
1562 if (best
->decommitted
) {
1563 TCMalloc_SystemCommit(reinterpret_cast<void*>(best
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1564 best
->decommitted
= false;
1565 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1566 pages_committed_since_last_scavenge_
+= n
;
1569 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1571 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1572 // free committed pages count.
1573 ASSERT(free_committed_pages_
>= n
);
1574 free_committed_pages_
-= n
;
1576 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1584 Span
* TCMalloc_PageHeap::Split(Span
* span
, Length n
) {
1586 ASSERT(n
< span
->length
);
1587 ASSERT(!span
->free
);
1588 ASSERT(span
->sizeclass
== 0);
1589 Event(span
, 'T', n
);
1591 const Length extra
= span
->length
- n
;
1592 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1593 Event(leftover
, 'U', extra
);
1594 RecordSpan(leftover
);
1595 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
1601 static ALWAYS_INLINE
void propagateDecommittedState(Span
* destination
, Span
* source
)
1603 destination
->decommitted
= source
->decommitted
;
1606 inline void TCMalloc_PageHeap::Carve(Span
* span
, Length n
, bool released
) {
1610 Event(span
, 'A', n
);
1612 const int extra
= static_cast<int>(span
->length
- n
);
1615 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1617 propagateDecommittedState(leftover
, span
);
1618 Event(leftover
, 'S', extra
);
1619 RecordSpan(leftover
);
1621 // Place leftover span on appropriate free list
1622 SpanList
* listpair
= (static_cast<size_t>(extra
) < kMaxPages
) ? &free_
[extra
] : &large_
;
1623 Span
* dst
= released
? &listpair
->returned
: &listpair
->normal
;
1624 DLL_Prepend(dst
, leftover
);
1627 pagemap_
.set(span
->start
+ n
- 1, span
);
1631 static ALWAYS_INLINE
void mergeDecommittedStates(Span
* destination
, Span
* other
)
1633 if (destination
->decommitted
&& !other
->decommitted
) {
1634 TCMalloc_SystemRelease(reinterpret_cast<void*>(other
->start
<< kPageShift
),
1635 static_cast<size_t>(other
->length
<< kPageShift
));
1636 } else if (other
->decommitted
&& !destination
->decommitted
) {
1637 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination
->start
<< kPageShift
),
1638 static_cast<size_t>(destination
->length
<< kPageShift
));
1639 destination
->decommitted
= true;
1643 inline void TCMalloc_PageHeap::Delete(Span
* span
) {
1645 ASSERT(!span
->free
);
1646 ASSERT(span
->length
> 0);
1647 ASSERT(GetDescriptor(span
->start
) == span
);
1648 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
1649 span
->sizeclass
= 0;
1650 #ifndef NO_TCMALLOC_SAMPLES
1654 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1655 // necessary. We do not bother resetting the stale pagemap
1656 // entries for the pieces we are merging together because we only
1657 // care about the pagemap entries for the boundaries.
1658 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1659 // Track the total size of the neighboring free spans that are committed.
1660 Length neighboringCommittedSpansLength
= 0;
1662 const PageID p
= span
->start
;
1663 const Length n
= span
->length
;
1664 Span
* prev
= GetDescriptor(p
-1);
1665 if (prev
!= NULL
&& prev
->free
) {
1666 // Merge preceding span into this span
1667 ASSERT(prev
->start
+ prev
->length
== p
);
1668 const Length len
= prev
->length
;
1669 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1670 if (!prev
->decommitted
)
1671 neighboringCommittedSpansLength
+= len
;
1673 mergeDecommittedStates(span
, prev
);
1677 span
->length
+= len
;
1678 pagemap_
.set(span
->start
, span
);
1679 Event(span
, 'L', len
);
1681 Span
* next
= GetDescriptor(p
+n
);
1682 if (next
!= NULL
&& next
->free
) {
1683 // Merge next span into this span
1684 ASSERT(next
->start
== p
+n
);
1685 const Length len
= next
->length
;
1686 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1687 if (!next
->decommitted
)
1688 neighboringCommittedSpansLength
+= len
;
1690 mergeDecommittedStates(span
, next
);
1693 span
->length
+= len
;
1694 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1695 Event(span
, 'R', len
);
1698 Event(span
, 'D', span
->length
);
1700 if (span
->decommitted
) {
1701 if (span
->length
< kMaxPages
)
1702 DLL_Prepend(&free_
[span
->length
].returned
, span
);
1704 DLL_Prepend(&large_
.returned
, span
);
1706 if (span
->length
< kMaxPages
)
1707 DLL_Prepend(&free_
[span
->length
].normal
, span
);
1709 DLL_Prepend(&large_
.normal
, span
);
1713 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1714 if (span
->decommitted
) {
1715 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
1716 // committed. Update the free committed pages count.
1717 free_committed_pages_
-= neighboringCommittedSpansLength
;
1719 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
1720 free_committed_pages_
+= n
;
1723 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
1724 if (!m_scavengeThreadActive
&& shouldContinueScavenging())
1725 pthread_cond_signal(&m_scavengeCondition
);
1727 IncrementalScavenge(n
);
1733 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1734 void TCMalloc_PageHeap::IncrementalScavenge(Length n
) {
1735 // Fast path; not yet time to release memory
1736 scavenge_counter_
-= n
;
1737 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
1739 static const size_t kDefaultReleaseDelay
= 64;
1741 // Find index of free list to scavenge
1742 size_t index
= scavenge_index_
+ 1;
1743 for (size_t i
= 0; i
< kMaxPages
+1; i
++) {
1744 if (index
> kMaxPages
) index
= 0;
1745 SpanList
* slist
= (index
== kMaxPages
) ? &large_
: &free_
[index
];
1746 if (!DLL_IsEmpty(&slist
->normal
)) {
1747 // Release the last span on the normal portion of this list
1748 Span
* s
= slist
->normal
.prev
;
1750 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1751 static_cast<size_t>(s
->length
<< kPageShift
));
1752 s
->decommitted
= true;
1753 DLL_Prepend(&slist
->returned
, s
);
1755 scavenge_counter_
= std::max
<size_t>(16UL, std::min
<size_t>(kDefaultReleaseDelay
, kDefaultReleaseDelay
- (free_pages_
/ kDefaultReleaseDelay
)));
1757 if (index
== kMaxPages
&& !DLL_IsEmpty(&slist
->normal
))
1758 scavenge_index_
= index
- 1;
1760 scavenge_index_
= index
;
1766 // Nothing to scavenge, delay for a while
1767 scavenge_counter_
= kDefaultReleaseDelay
;
1771 void TCMalloc_PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
1772 // Associate span object with all interior pages as well
1773 ASSERT(!span
->free
);
1774 ASSERT(GetDescriptor(span
->start
) == span
);
1775 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
1776 Event(span
, 'C', sc
);
1777 span
->sizeclass
= static_cast<unsigned int>(sc
);
1778 for (Length i
= 1; i
< span
->length
-1; i
++) {
1779 pagemap_
.set(span
->start
+i
, span
);
1784 size_t TCMalloc_PageHeap::ReturnedBytes() const {
1786 for (unsigned s
= 0; s
< kMaxPages
; s
++) {
1787 const int r_length
= DLL_Length(&free_
[s
].returned
);
1788 unsigned r_pages
= s
* r_length
;
1789 result
+= r_pages
<< kPageShift
;
1792 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
)
1793 result
+= s
->length
<< kPageShift
;
1799 static double PagesToMB(uint64_t pages
) {
1800 return (pages
<< kPageShift
) / 1048576.0;
1803 void TCMalloc_PageHeap::Dump(TCMalloc_Printer
* out
) {
1804 int nonempty_sizes
= 0;
1805 for (int s
= 0; s
< kMaxPages
; s
++) {
1806 if (!DLL_IsEmpty(&free_
[s
].normal
) || !DLL_IsEmpty(&free_
[s
].returned
)) {
1810 out
->printf("------------------------------------------------\n");
1811 out
->printf("PageHeap: %d sizes; %6.1f MB free\n",
1812 nonempty_sizes
, PagesToMB(free_pages_
));
1813 out
->printf("------------------------------------------------\n");
1814 uint64_t total_normal
= 0;
1815 uint64_t total_returned
= 0;
1816 for (int s
= 0; s
< kMaxPages
; s
++) {
1817 const int n_length
= DLL_Length(&free_
[s
].normal
);
1818 const int r_length
= DLL_Length(&free_
[s
].returned
);
1819 if (n_length
+ r_length
> 0) {
1820 uint64_t n_pages
= s
* n_length
;
1821 uint64_t r_pages
= s
* r_length
;
1822 total_normal
+= n_pages
;
1823 total_returned
+= r_pages
;
1824 out
->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1825 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1827 (n_length
+ r_length
),
1828 PagesToMB(n_pages
+ r_pages
),
1829 PagesToMB(total_normal
+ total_returned
),
1831 PagesToMB(total_returned
));
1835 uint64_t n_pages
= 0;
1836 uint64_t r_pages
= 0;
1839 out
->printf("Normal large spans:\n");
1840 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
1841 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1842 s
->length
, PagesToMB(s
->length
));
1843 n_pages
+= s
->length
;
1846 out
->printf("Unmapped large spans:\n");
1847 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
1848 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1849 s
->length
, PagesToMB(s
->length
));
1850 r_pages
+= s
->length
;
1853 total_normal
+= n_pages
;
1854 total_returned
+= r_pages
;
1855 out
->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1856 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1857 (n_spans
+ r_spans
),
1858 PagesToMB(n_pages
+ r_pages
),
1859 PagesToMB(total_normal
+ total_returned
),
1861 PagesToMB(total_returned
));
1865 bool TCMalloc_PageHeap::GrowHeap(Length n
) {
1866 ASSERT(kMaxPages
>= kMinSystemAlloc
);
1867 if (n
> kMaxValidPages
) return false;
1868 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
1870 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1873 // Try growing just "n" pages
1875 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1877 if (ptr
== NULL
) return false;
1879 ask
= actual_size
>> kPageShift
;
1881 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1882 pages_committed_since_last_scavenge_
+= ask
;
1885 uint64_t old_system_bytes
= system_bytes_
;
1886 system_bytes_
+= (ask
<< kPageShift
);
1887 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1890 // If we have already a lot of pages allocated, just pre allocate a bunch of
1891 // memory for the page map. This prevents fragmentation by pagemap metadata
1892 // when a program keeps allocating and freeing large blocks.
1894 if (old_system_bytes
< kPageMapBigAllocationThreshold
1895 && system_bytes_
>= kPageMapBigAllocationThreshold
) {
1896 pagemap_
.PreallocateMoreMemory();
1899 // Make sure pagemap_ has entries for all of the new pages.
1900 // Plus ensure one before and one after so coalescing code
1901 // does not need bounds-checking.
1902 if (pagemap_
.Ensure(p
-1, ask
+2)) {
1903 // Pretend the new area is allocated and then Delete() it to
1904 // cause any necessary coalescing to occur.
1906 // We do not adjust free_pages_ here since Delete() will do it for us.
1907 Span
* span
= NewSpan(p
, ask
);
1913 // We could not allocate memory within "pagemap_"
1914 // TODO: Once we can return memory to the system, return the new span
1919 bool TCMalloc_PageHeap::Check() {
1920 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
1921 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
1922 CheckList(&large_
.normal
, kMaxPages
, 1000000000);
1923 CheckList(&large_
.returned
, kMaxPages
, 1000000000);
1924 for (Length s
= 1; s
< kMaxPages
; s
++) {
1925 CheckList(&free_
[s
].normal
, s
, s
);
1926 CheckList(&free_
[s
].returned
, s
, s
);
1932 bool TCMalloc_PageHeap::CheckList(Span
*, Length
, Length
) {
1936 bool TCMalloc_PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
) {
1937 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1938 CHECK_CONDITION(s
->free
);
1939 CHECK_CONDITION(s
->length
>= min_pages
);
1940 CHECK_CONDITION(s
->length
<= max_pages
);
1941 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
1942 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
1948 static void ReleaseFreeList(Span
* list
, Span
* returned
) {
1949 // Walk backwards through list so that when we push these
1950 // spans on the "returned" list, we preserve the order.
1951 while (!DLL_IsEmpty(list
)) {
1952 Span
* s
= list
->prev
;
1954 DLL_Prepend(returned
, s
);
1955 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1956 static_cast<size_t>(s
->length
<< kPageShift
));
1960 void TCMalloc_PageHeap::ReleaseFreePages() {
1961 for (Length s
= 0; s
< kMaxPages
; s
++) {
1962 ReleaseFreeList(&free_
[s
].normal
, &free_
[s
].returned
);
1964 ReleaseFreeList(&large_
.normal
, &large_
.returned
);
1968 //-------------------------------------------------------------------
1970 //-------------------------------------------------------------------
1972 class TCMalloc_ThreadCache_FreeList
{
1974 void* list_
; // Linked list of nodes
1975 uint16_t length_
; // Current length
1976 uint16_t lowater_
; // Low water mark for list length
1985 // Return current length of list
1986 int length() const {
1991 bool empty() const {
1992 return list_
== NULL
;
1995 // Low-water mark management
1996 int lowwatermark() const { return lowater_
; }
1997 void clear_lowwatermark() { lowater_
= length_
; }
1999 ALWAYS_INLINE
void Push(void* ptr
) {
2000 SLL_Push(&list_
, ptr
);
2004 void PushRange(int N
, void *start
, void *end
) {
2005 SLL_PushRange(&list_
, start
, end
);
2006 length_
= length_
+ static_cast<uint16_t>(N
);
2009 void PopRange(int N
, void **start
, void **end
) {
2010 SLL_PopRange(&list_
, N
, start
, end
);
2011 ASSERT(length_
>= N
);
2012 length_
= length_
- static_cast<uint16_t>(N
);
2013 if (length_
< lowater_
) lowater_
= length_
;
2016 ALWAYS_INLINE
void* Pop() {
2017 ASSERT(list_
!= NULL
);
2019 if (length_
< lowater_
) lowater_
= length_
;
2020 return SLL_Pop(&list_
);
2024 template <class Finder
, class Reader
>
2025 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
2027 for (void* nextObject
= list_
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
2028 finder
.visit(nextObject
);
2033 //-------------------------------------------------------------------
2034 // Data kept per thread
2035 //-------------------------------------------------------------------
2037 class TCMalloc_ThreadCache
{
2039 typedef TCMalloc_ThreadCache_FreeList FreeList
;
2041 typedef DWORD ThreadIdentifier
;
2043 typedef pthread_t ThreadIdentifier
;
2046 size_t size_
; // Combined size of data
2047 ThreadIdentifier tid_
; // Which thread owns it
2048 bool in_setspecific_
; // Called pthread_setspecific?
2049 FreeList list_
[kNumClasses
]; // Array indexed by size-class
2051 // We sample allocations, biased by the size of the allocation
2052 uint32_t rnd_
; // Cheap random number generator
2053 size_t bytes_until_sample_
; // Bytes until we sample next
2055 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2056 static inline TCMalloc_ThreadCache
* NewHeap(ThreadIdentifier tid
);
2058 // Use only as pthread thread-specific destructor function.
2059 static void DestroyThreadCache(void* ptr
);
2061 // All ThreadCache objects are kept in a linked list (for stats collection)
2062 TCMalloc_ThreadCache
* next_
;
2063 TCMalloc_ThreadCache
* prev_
;
2065 void Init(ThreadIdentifier tid
);
2068 // Accessors (mostly just for printing stats)
2069 int freelist_length(size_t cl
) const { return list_
[cl
].length(); }
2071 // Total byte size in cache
2072 size_t Size() const { return size_
; }
2074 void* Allocate(size_t size
);
2075 void Deallocate(void* ptr
, size_t size_class
);
2077 void FetchFromCentralCache(size_t cl
, size_t allocationSize
);
2078 void ReleaseToCentralCache(size_t cl
, int N
);
2082 // Record allocation of "k" bytes. Return true iff allocation
2083 // should be sampled
2084 bool SampleAllocation(size_t k
);
2086 // Pick next sampling point
2087 void PickNextSample(size_t k
);
2089 static void InitModule();
2090 static void InitTSD();
2091 static TCMalloc_ThreadCache
* GetThreadHeap();
2092 static TCMalloc_ThreadCache
* GetCache();
2093 static TCMalloc_ThreadCache
* GetCacheIfPresent();
2094 static TCMalloc_ThreadCache
* CreateCacheIfNecessary();
2095 static void DeleteCache(TCMalloc_ThreadCache
* heap
);
2096 static void BecomeIdle();
2097 static void RecomputeThreadCacheSize();
2100 template <class Finder
, class Reader
>
2101 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
2103 for (unsigned sizeClass
= 0; sizeClass
< kNumClasses
; sizeClass
++)
2104 list_
[sizeClass
].enumerateFreeObjects(finder
, reader
);
2109 //-------------------------------------------------------------------
2110 // Data kept per size-class in central cache
2111 //-------------------------------------------------------------------
2113 class TCMalloc_Central_FreeList
{
2115 void Init(size_t cl
);
2117 // These methods all do internal locking.
2119 // Insert the specified range into the central freelist. N is the number of
2120 // elements in the range.
2121 void InsertRange(void *start
, void *end
, int N
);
2123 // Returns the actual number of fetched elements into N.
2124 void RemoveRange(void **start
, void **end
, int *N
);
2126 // Returns the number of free objects in cache.
2128 SpinLockHolder
h(&lock_
);
2132 // Returns the number of free objects in the transfer cache.
2134 SpinLockHolder
h(&lock_
);
2135 return used_slots_
* num_objects_to_move
[size_class_
];
2139 template <class Finder
, class Reader
>
2140 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
, TCMalloc_Central_FreeList
* remoteCentralFreeList
)
2142 for (Span
* span
= &empty_
; span
&& span
!= &empty_
; span
= (span
->next
? reader(span
->next
) : 0))
2143 ASSERT(!span
->objects
);
2145 ASSERT(!nonempty_
.objects
);
2146 static const ptrdiff_t nonemptyOffset
= reinterpret_cast<const char*>(&nonempty_
) - reinterpret_cast<const char*>(this);
2148 Span
* remoteNonempty
= reinterpret_cast<Span
*>(reinterpret_cast<char*>(remoteCentralFreeList
) + nonemptyOffset
);
2149 Span
* remoteSpan
= nonempty_
.next
;
2151 for (Span
* span
= reader(remoteSpan
); span
&& remoteSpan
!= remoteNonempty
; remoteSpan
= span
->next
, span
= (span
->next
? reader(span
->next
) : 0)) {
2152 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
2153 finder
.visit(nextObject
);
2159 // REQUIRES: lock_ is held
2160 // Remove object from cache and return.
2161 // Return NULL if no free entries in cache.
2162 void* FetchFromSpans();
2164 // REQUIRES: lock_ is held
2165 // Remove object from cache and return. Fetches
2166 // from pageheap if cache is empty. Only returns
2167 // NULL on allocation failure.
2168 void* FetchFromSpansSafe();
2170 // REQUIRES: lock_ is held
2171 // Release a linked list of objects to spans.
2172 // May temporarily release lock_.
2173 void ReleaseListToSpans(void *start
);
2175 // REQUIRES: lock_ is held
2176 // Release an object to spans.
2177 // May temporarily release lock_.
2178 void ReleaseToSpans(void* object
);
2180 // REQUIRES: lock_ is held
2181 // Populate cache by fetching from the page heap.
2182 // May temporarily release lock_.
2185 // REQUIRES: lock is held.
2186 // Tries to make room for a TCEntry. If the cache is full it will try to
2187 // expand it at the cost of some other cache size. Return false if there is
2189 bool MakeCacheSpace();
2191 // REQUIRES: lock_ for locked_size_class is held.
2192 // Picks a "random" size class to steal TCEntry slot from. In reality it
2193 // just iterates over the sizeclasses but does so without taking a lock.
2194 // Returns true on success.
2195 // May temporarily lock a "random" size class.
2196 static bool EvictRandomSizeClass(size_t locked_size_class
, bool force
);
2198 // REQUIRES: lock_ is *not* held.
2199 // Tries to shrink the Cache. If force is true it will relase objects to
2200 // spans if it allows it to shrink the cache. Return false if it failed to
2201 // shrink the cache. Decrements cache_size_ on succeess.
2202 // May temporarily take lock_. If it takes lock_, the locked_size_class
2203 // lock is released to the thread from holding two size class locks
2204 // concurrently which could lead to a deadlock.
2205 bool ShrinkCache(int locked_size_class
, bool force
);
2207 // This lock protects all the data members. cached_entries and cache_size_
2208 // may be looked at without holding the lock.
2211 // We keep linked lists of empty and non-empty spans.
2212 size_t size_class_
; // My size class
2213 Span empty_
; // Dummy header for list of empty spans
2214 Span nonempty_
; // Dummy header for list of non-empty spans
2215 size_t counter_
; // Number of free objects in cache entry
2217 // Here we reserve space for TCEntry cache slots. Since one size class can
2218 // end up getting all the TCEntries quota in the system we just preallocate
2219 // sufficient number of entries here.
2220 TCEntry tc_slots_
[kNumTransferEntries
];
2222 // Number of currently used cached entries in tc_slots_. This variable is
2223 // updated under a lock but can be read without one.
2224 int32_t used_slots_
;
2225 // The current number of slots for this size class. This is an
2226 // adaptive value that is increased if there is lots of traffic
2227 // on a given size class.
2228 int32_t cache_size_
;
2231 // Pad each CentralCache object to multiple of 64 bytes
2232 class TCMalloc_Central_FreeListPadded
: public TCMalloc_Central_FreeList
{
2234 char pad_
[(64 - (sizeof(TCMalloc_Central_FreeList
) % 64)) % 64];
2237 //-------------------------------------------------------------------
2239 //-------------------------------------------------------------------
2241 // Central cache -- a collection of free-lists, one per size-class.
2242 // We have a separate lock per free-list to reduce contention.
2243 static TCMalloc_Central_FreeListPadded central_cache
[kNumClasses
];
2245 // Page-level allocator
2246 static SpinLock pageheap_lock
= SPINLOCK_INITIALIZER
;
2249 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned
));
2251 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)];
2253 static bool phinited
= false;
2255 // Avoid extra level of indirection by making "pageheap" be just an alias
2256 // of pageheap_memory.
2259 TCMalloc_PageHeap
* m_pageHeap
;
2262 static inline TCMalloc_PageHeap
* getPageHeap()
2264 PageHeapUnion u
= { &pageheap_memory
[0] };
2265 return u
.m_pageHeap
;
2268 #define pageheap getPageHeap()
2270 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2272 static void sleep(unsigned seconds
)
2274 ::Sleep(seconds
* 1000);
2278 void TCMalloc_PageHeap::scavengerThread()
2280 #if HAVE(PTHREAD_SETNAME_NP)
2281 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2285 if (!shouldContinueScavenging()) {
2286 pthread_mutex_lock(&m_scavengeMutex
);
2287 m_scavengeThreadActive
= false;
2288 // Block until there are enough freed pages to release back to the system.
2289 pthread_cond_wait(&m_scavengeCondition
, &m_scavengeMutex
);
2290 m_scavengeThreadActive
= true;
2291 pthread_mutex_unlock(&m_scavengeMutex
);
2293 sleep(kScavengeTimerDelayInSeconds
);
2295 SpinLockHolder
h(&pageheap_lock
);
2296 pageheap
->scavenge();
2302 // If TLS is available, we also store a copy
2303 // of the per-thread object in a __thread variable
2304 // since __thread variables are faster to read
2305 // than pthread_getspecific(). We still need
2306 // pthread_setspecific() because __thread
2307 // variables provide no way to run cleanup
2308 // code when a thread is destroyed.
2310 static __thread TCMalloc_ThreadCache
*threadlocal_heap
;
2312 // Thread-specific key. Initialization here is somewhat tricky
2313 // because some Linux startup code invokes malloc() before it
2314 // is in a good enough state to handle pthread_keycreate().
2315 // Therefore, we use TSD keys only after tsd_inited is set to true.
2316 // Until then, we use a slow path to get the heap object.
2317 static bool tsd_inited
= false;
2318 static pthread_key_t heap_key
;
2320 DWORD tlsIndex
= TLS_OUT_OF_INDEXES
;
2323 static ALWAYS_INLINE
void setThreadHeap(TCMalloc_ThreadCache
* heap
)
2325 // still do pthread_setspecific when using MSVC fast TLS to
2326 // benefit from the delete callback.
2327 pthread_setspecific(heap_key
, heap
);
2329 TlsSetValue(tlsIndex
, heap
);
2333 // Allocator for thread heaps
2334 static PageHeapAllocator
<TCMalloc_ThreadCache
> threadheap_allocator
;
2336 // Linked list of heap objects. Protected by pageheap_lock.
2337 static TCMalloc_ThreadCache
* thread_heaps
= NULL
;
2338 static int thread_heap_count
= 0;
2340 // Overall thread cache size. Protected by pageheap_lock.
2341 static size_t overall_thread_cache_size
= kDefaultOverallThreadCacheSize
;
2343 // Global per-thread cache size. Writes are protected by
2344 // pageheap_lock. Reads are done without any locking, which should be
2345 // fine as long as size_t can be written atomically and we don't place
2346 // invariants between this variable and other pieces of state.
2347 static volatile size_t per_thread_cache_size
= kMaxThreadCacheSize
;
2349 //-------------------------------------------------------------------
2350 // Central cache implementation
2351 //-------------------------------------------------------------------
2353 void TCMalloc_Central_FreeList::Init(size_t cl
) {
2357 DLL_Init(&nonempty_
);
2362 ASSERT(cache_size_
<= kNumTransferEntries
);
2365 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start
) {
2367 void *next
= SLL_Next(start
);
2368 ReleaseToSpans(start
);
2373 ALWAYS_INLINE
void TCMalloc_Central_FreeList::ReleaseToSpans(void* object
) {
2374 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
2375 Span
* span
= pageheap
->GetDescriptor(p
);
2376 ASSERT(span
!= NULL
);
2377 ASSERT(span
->refcount
> 0);
2379 // If span is empty, move it to non-empty list
2380 if (span
->objects
== NULL
) {
2382 DLL_Prepend(&nonempty_
, span
);
2383 Event(span
, 'N', 0);
2386 // The following check is expensive, so it is disabled by default
2388 // Check that object does not occur in list
2390 for (void* p
= span
->objects
; p
!= NULL
; p
= *((void**) p
)) {
2391 ASSERT(p
!= object
);
2394 ASSERT(got
+ span
->refcount
==
2395 (span
->length
<<kPageShift
)/ByteSizeForClass(span
->sizeclass
));
2400 if (span
->refcount
== 0) {
2401 Event(span
, '#', 0);
2402 counter_
-= (span
->length
<<kPageShift
) / ByteSizeForClass(span
->sizeclass
);
2405 // Release central list lock while operating on pageheap
2408 SpinLockHolder
h(&pageheap_lock
);
2409 pageheap
->Delete(span
);
2413 *(reinterpret_cast<void**>(object
)) = span
->objects
;
2414 span
->objects
= object
;
2418 ALWAYS_INLINE
bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2419 size_t locked_size_class
, bool force
) {
2420 static int race_counter
= 0;
2421 int t
= race_counter
++; // Updated without a lock, but who cares.
2422 if (t
>= static_cast<int>(kNumClasses
)) {
2423 while (t
>= static_cast<int>(kNumClasses
)) {
2429 ASSERT(t
< static_cast<int>(kNumClasses
));
2430 if (t
== static_cast<int>(locked_size_class
)) return false;
2431 return central_cache
[t
].ShrinkCache(static_cast<int>(locked_size_class
), force
);
2434 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2435 // Is there room in the cache?
2436 if (used_slots_
< cache_size_
) return true;
2437 // Check if we can expand this cache?
2438 if (cache_size_
== kNumTransferEntries
) return false;
2439 // Ok, we'll try to grab an entry from some other size class.
2440 if (EvictRandomSizeClass(size_class_
, false) ||
2441 EvictRandomSizeClass(size_class_
, true)) {
2442 // Succeeded in evicting, we're going to make our cache larger.
2451 class LockInverter
{
2453 SpinLock
*held_
, *temp_
;
2455 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
2456 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
2457 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
2461 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class
, bool force
) {
2462 // Start with a quick check without taking a lock.
2463 if (cache_size_
== 0) return false;
2464 // We don't evict from a full cache unless we are 'forcing'.
2465 if (force
== false && used_slots_
== cache_size_
) return false;
2467 // Grab lock, but first release the other lock held by this thread. We use
2468 // the lock inverter to ensure that we never hold two size class locks
2469 // concurrently. That can create a deadlock because there is no well
2470 // defined nesting order.
2471 LockInverter
li(¢ral_cache
[locked_size_class
].lock_
, &lock_
);
2472 ASSERT(used_slots_
<= cache_size_
);
2473 ASSERT(0 <= cache_size_
);
2474 if (cache_size_
== 0) return false;
2475 if (used_slots_
== cache_size_
) {
2476 if (force
== false) return false;
2477 // ReleaseListToSpans releases the lock, so we have to make all the
2478 // updates to the central list before calling it.
2481 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
2488 void TCMalloc_Central_FreeList::InsertRange(void *start
, void *end
, int N
) {
2489 SpinLockHolder
h(&lock_
);
2490 if (N
== num_objects_to_move
[size_class_
] &&
2492 int slot
= used_slots_
++;
2494 ASSERT(slot
< kNumTransferEntries
);
2495 TCEntry
*entry
= &tc_slots_
[slot
];
2496 entry
->head
= start
;
2500 ReleaseListToSpans(start
);
2503 void TCMalloc_Central_FreeList::RemoveRange(void **start
, void **end
, int *N
) {
2507 SpinLockHolder
h(&lock_
);
2508 if (num
== num_objects_to_move
[size_class_
] && used_slots_
> 0) {
2509 int slot
= --used_slots_
;
2511 TCEntry
*entry
= &tc_slots_
[slot
];
2512 *start
= entry
->head
;
2517 // TODO: Prefetch multiple TCEntries?
2518 void *tail
= FetchFromSpansSafe();
2520 // We are completely out of memory.
2521 *start
= *end
= NULL
;
2526 SLL_SetNext(tail
, NULL
);
2529 while (count
< num
) {
2530 void *t
= FetchFromSpans();
2541 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2542 void *t
= FetchFromSpans();
2545 t
= FetchFromSpans();
2550 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2551 if (DLL_IsEmpty(&nonempty_
)) return NULL
;
2552 Span
* span
= nonempty_
.next
;
2554 ASSERT(span
->objects
!= NULL
);
2555 ASSERT_SPAN_COMMITTED(span
);
2557 void* result
= span
->objects
;
2558 span
->objects
= *(reinterpret_cast<void**>(result
));
2559 if (span
->objects
== NULL
) {
2560 // Move to empty list
2562 DLL_Prepend(&empty_
, span
);
2563 Event(span
, 'E', 0);
2569 // Fetch memory from the system and add to the central cache freelist.
2570 ALWAYS_INLINE
void TCMalloc_Central_FreeList::Populate() {
2571 // Release central list lock while operating on pageheap
2573 const size_t npages
= class_to_pages
[size_class_
];
2577 SpinLockHolder
h(&pageheap_lock
);
2578 span
= pageheap
->New(npages
);
2579 if (span
) pageheap
->RegisterSizeClass(span
, size_class_
);
2582 MESSAGE("allocation failed: %d\n", errno
);
2586 ASSERT_SPAN_COMMITTED(span
);
2587 ASSERT(span
->length
== npages
);
2588 // Cache sizeclass info eagerly. Locking is not necessary.
2589 // (Instead of being eager, we could just replace any stale info
2590 // about this span, but that seems to be no better in practice.)
2591 for (size_t i
= 0; i
< npages
; i
++) {
2592 pageheap
->CacheSizeClass(span
->start
+ i
, size_class_
);
2595 // Split the block into pieces and add to the free-list
2596 // TODO: coloring of objects to avoid cache conflicts?
2597 void** tail
= &span
->objects
;
2598 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
2599 char* limit
= ptr
+ (npages
<< kPageShift
);
2600 const size_t size
= ByteSizeForClass(size_class_
);
2603 while ((nptr
= ptr
+ size
) <= limit
) {
2605 tail
= reinterpret_cast<void**>(ptr
);
2609 ASSERT(ptr
<= limit
);
2611 span
->refcount
= 0; // No sub-object in use yet
2613 // Add span to list of non-empty spans
2615 DLL_Prepend(&nonempty_
, span
);
2619 //-------------------------------------------------------------------
2620 // TCMalloc_ThreadCache implementation
2621 //-------------------------------------------------------------------
2623 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k
) {
2624 if (bytes_until_sample_
< k
) {
2628 bytes_until_sample_
-= k
;
2633 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid
) {
2638 in_setspecific_
= false;
2639 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2643 // Initialize RNG -- run it for a bit to get to good values
2644 bytes_until_sample_
= 0;
2645 rnd_
= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2646 for (int i
= 0; i
< 100; i
++) {
2647 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter
* 2));
2651 void TCMalloc_ThreadCache::Cleanup() {
2652 // Put unused memory back into central cache
2653 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2654 if (list_
[cl
].length() > 0) {
2655 ReleaseToCentralCache(cl
, list_
[cl
].length());
2660 ALWAYS_INLINE
void* TCMalloc_ThreadCache::Allocate(size_t size
) {
2661 ASSERT(size
<= kMaxSize
);
2662 const size_t cl
= SizeClass(size
);
2663 FreeList
* list
= &list_
[cl
];
2664 size_t allocationSize
= ByteSizeForClass(cl
);
2665 if (list
->empty()) {
2666 FetchFromCentralCache(cl
, allocationSize
);
2667 if (list
->empty()) return NULL
;
2669 size_
-= allocationSize
;
2673 inline void TCMalloc_ThreadCache::Deallocate(void* ptr
, size_t cl
) {
2674 size_
+= ByteSizeForClass(cl
);
2675 FreeList
* list
= &list_
[cl
];
2677 // If enough data is free, put back into central cache
2678 if (list
->length() > kMaxFreeListLength
) {
2679 ReleaseToCentralCache(cl
, num_objects_to_move
[cl
]);
2681 if (size_
>= per_thread_cache_size
) Scavenge();
2684 // Remove some objects of class "cl" from central cache and add to thread heap
2685 ALWAYS_INLINE
void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl
, size_t allocationSize
) {
2686 int fetch_count
= num_objects_to_move
[cl
];
2688 central_cache
[cl
].RemoveRange(&start
, &end
, &fetch_count
);
2689 list_
[cl
].PushRange(fetch_count
, start
, end
);
2690 size_
+= allocationSize
* fetch_count
;
2693 // Remove some objects of class "cl" from thread heap and add to central cache
2694 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl
, int N
) {
2696 FreeList
* src
= &list_
[cl
];
2697 if (N
> src
->length()) N
= src
->length();
2698 size_
-= N
*ByteSizeForClass(cl
);
2700 // We return prepackaged chains of the correct size to the central cache.
2701 // TODO: Use the same format internally in the thread caches?
2702 int batch_size
= num_objects_to_move
[cl
];
2703 while (N
> batch_size
) {
2705 src
->PopRange(batch_size
, &head
, &tail
);
2706 central_cache
[cl
].InsertRange(head
, tail
, batch_size
);
2710 src
->PopRange(N
, &head
, &tail
);
2711 central_cache
[cl
].InsertRange(head
, tail
, N
);
2714 // Release idle memory to the central cache
2715 inline void TCMalloc_ThreadCache::Scavenge() {
2716 // If the low-water mark for the free list is L, it means we would
2717 // not have had to allocate anything from the central cache even if
2718 // we had reduced the free list size by L. We aim to get closer to
2719 // that situation by dropping L/2 nodes from the free list. This
2720 // may not release much memory, but if so we will call scavenge again
2721 // pretty soon and the low-water marks will be high on that call.
2722 //int64 start = CycleClock::Now();
2724 for (size_t cl
= 0; cl
< kNumClasses
; cl
++) {
2725 FreeList
* list
= &list_
[cl
];
2726 const int lowmark
= list
->lowwatermark();
2728 const int drop
= (lowmark
> 1) ? lowmark
/2 : 1;
2729 ReleaseToCentralCache(cl
, drop
);
2731 list
->clear_lowwatermark();
2734 //int64 finish = CycleClock::Now();
2736 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2739 void TCMalloc_ThreadCache::PickNextSample(size_t k
) {
2740 // Make next "random" number
2741 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2742 static const uint32_t kPoly
= (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2744 rnd_
= (r
<< 1) ^ ((static_cast<int32_t>(r
) >> 31) & kPoly
);
2746 // Next point is "rnd_ % (sample_period)". I.e., average
2747 // increment is "sample_period/2".
2748 const int flag_value
= static_cast<int>(FLAGS_tcmalloc_sample_parameter
);
2749 static int last_flag_value
= -1;
2751 if (flag_value
!= last_flag_value
) {
2752 SpinLockHolder
h(&sample_period_lock
);
2754 for (i
= 0; i
< (static_cast<int>(sizeof(primes_list
)/sizeof(primes_list
[0])) - 1); i
++) {
2755 if (primes_list
[i
] >= flag_value
) {
2759 sample_period
= primes_list
[i
];
2760 last_flag_value
= flag_value
;
2763 bytes_until_sample_
+= rnd_
% sample_period
;
2765 if (k
> (static_cast<size_t>(-1) >> 2)) {
2766 // If the user has asked for a huge allocation then it is possible
2767 // for the code below to loop infinitely. Just return (note that
2768 // this throws off the sampling accuracy somewhat, but a user who
2769 // is allocating more than 1G of memory at a time can live with a
2770 // minor inaccuracy in profiling of small allocations, and also
2771 // would rather not wait for the loop below to terminate).
2775 while (bytes_until_sample_
< k
) {
2776 // Increase bytes_until_sample_ by enough average sampling periods
2777 // (sample_period >> 1) to allow us to sample past the current
2779 bytes_until_sample_
+= (sample_period
>> 1);
2782 bytes_until_sample_
-= k
;
2785 void TCMalloc_ThreadCache::InitModule() {
2786 // There is a slight potential race here because of double-checked
2787 // locking idiom. However, as long as the program does a small
2788 // allocation before switching to multi-threaded mode, we will be
2789 // fine. We increase the chances of doing such a small allocation
2790 // by doing one in the constructor of the module_enter_exit_hook
2791 // object declared below.
2792 SpinLockHolder
h(&pageheap_lock
);
2798 threadheap_allocator
.Init();
2799 span_allocator
.Init();
2800 span_allocator
.New(); // Reduce cache conflicts
2801 span_allocator
.New(); // Reduce cache conflicts
2802 stacktrace_allocator
.Init();
2803 DLL_Init(&sampled_objects
);
2804 for (size_t i
= 0; i
< kNumClasses
; ++i
) {
2805 central_cache
[i
].Init(i
);
2809 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
2810 FastMallocZone::init();
2815 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid
) {
2816 // Create the heap and add it to the linked list
2817 TCMalloc_ThreadCache
*heap
= threadheap_allocator
.New();
2819 heap
->next_
= thread_heaps
;
2821 if (thread_heaps
!= NULL
) thread_heaps
->prev_
= heap
;
2822 thread_heaps
= heap
;
2823 thread_heap_count
++;
2824 RecomputeThreadCacheSize();
2828 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetThreadHeap() {
2830 // __thread is faster, but only when the kernel supports it
2831 if (KernelSupportsTLS())
2832 return threadlocal_heap
;
2833 #elif COMPILER(MSVC)
2834 return static_cast<TCMalloc_ThreadCache
*>(TlsGetValue(tlsIndex
));
2836 return static_cast<TCMalloc_ThreadCache
*>(pthread_getspecific(heap_key
));
2840 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCache() {
2841 TCMalloc_ThreadCache
* ptr
= NULL
;
2845 ptr
= GetThreadHeap();
2847 if (ptr
== NULL
) ptr
= CreateCacheIfNecessary();
2851 // In deletion paths, we do not try to create a thread-cache. This is
2852 // because we may be in the thread destruction code and may have
2853 // already cleaned up the cache for this thread.
2854 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCacheIfPresent() {
2855 if (!tsd_inited
) return NULL
;
2856 void* const p
= GetThreadHeap();
2857 return reinterpret_cast<TCMalloc_ThreadCache
*>(p
);
2860 void TCMalloc_ThreadCache::InitTSD() {
2861 ASSERT(!tsd_inited
);
2862 pthread_key_create(&heap_key
, DestroyThreadCache
);
2864 tlsIndex
= TlsAlloc();
2869 // We may have used a fake pthread_t for the main thread. Fix it.
2871 memset(&zero
, 0, sizeof(zero
));
2874 SpinLockHolder
h(&pageheap_lock
);
2876 ASSERT(pageheap_lock
.IsHeld());
2878 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2881 h
->tid_
= GetCurrentThreadId();
2884 if (pthread_equal(h
->tid_
, zero
)) {
2885 h
->tid_
= pthread_self();
2891 TCMalloc_ThreadCache
* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2892 // Initialize per-thread data if necessary
2893 TCMalloc_ThreadCache
* heap
= NULL
;
2895 SpinLockHolder
h(&pageheap_lock
);
2902 me
= GetCurrentThreadId();
2905 // Early on in glibc's life, we cannot even call pthread_self()
2908 memset(&me
, 0, sizeof(me
));
2910 me
= pthread_self();
2914 // This may be a recursive malloc call from pthread_setspecific()
2915 // In that case, the heap for this thread has already been created
2916 // and added to the linked list. So we search for that first.
2917 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2919 if (h
->tid_
== me
) {
2921 if (pthread_equal(h
->tid_
, me
)) {
2928 if (heap
== NULL
) heap
= NewHeap(me
);
2931 // We call pthread_setspecific() outside the lock because it may
2932 // call malloc() recursively. The recursive call will never get
2933 // here again because it will find the already allocated heap in the
2934 // linked list of heaps.
2935 if (!heap
->in_setspecific_
&& tsd_inited
) {
2936 heap
->in_setspecific_
= true;
2937 setThreadHeap(heap
);
2942 void TCMalloc_ThreadCache::BecomeIdle() {
2943 if (!tsd_inited
) return; // No caches yet
2944 TCMalloc_ThreadCache
* heap
= GetThreadHeap();
2945 if (heap
== NULL
) return; // No thread cache to remove
2946 if (heap
->in_setspecific_
) return; // Do not disturb the active caller
2948 heap
->in_setspecific_
= true;
2949 pthread_setspecific(heap_key
, NULL
);
2951 // Also update the copy in __thread
2952 threadlocal_heap
= NULL
;
2954 heap
->in_setspecific_
= false;
2955 if (GetThreadHeap() == heap
) {
2956 // Somehow heap got reinstated by a recursive call to malloc
2957 // from pthread_setspecific. We give up in this case.
2961 // We can now get rid of the heap
2965 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr
) {
2966 // Note that "ptr" cannot be NULL since pthread promises not
2967 // to invoke the destructor on NULL values, but for safety,
2969 if (ptr
== NULL
) return;
2971 // Prevent fast path of GetThreadHeap() from returning heap.
2972 threadlocal_heap
= NULL
;
2974 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache
*>(ptr
));
2977 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache
* heap
) {
2978 // Remove all memory from heap
2981 // Remove from linked list
2982 SpinLockHolder
h(&pageheap_lock
);
2983 if (heap
->next_
!= NULL
) heap
->next_
->prev_
= heap
->prev_
;
2984 if (heap
->prev_
!= NULL
) heap
->prev_
->next_
= heap
->next_
;
2985 if (thread_heaps
== heap
) thread_heaps
= heap
->next_
;
2986 thread_heap_count
--;
2987 RecomputeThreadCacheSize();
2989 threadheap_allocator
.Delete(heap
);
2992 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
2993 // Divide available space across threads
2994 int n
= thread_heap_count
> 0 ? thread_heap_count
: 1;
2995 size_t space
= overall_thread_cache_size
/ n
;
2997 // Limit to allowed range
2998 if (space
< kMinThreadCacheSize
) space
= kMinThreadCacheSize
;
2999 if (space
> kMaxThreadCacheSize
) space
= kMaxThreadCacheSize
;
3001 per_thread_cache_size
= space
;
3004 void TCMalloc_ThreadCache::Print() const {
3005 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
3006 MESSAGE(" %5" PRIuS
" : %4d len; %4d lo\n",
3007 ByteSizeForClass(cl
),
3009 list_
[cl
].lowwatermark());
3013 // Extract interesting stats
3014 struct TCMallocStats
{
3015 uint64_t system_bytes
; // Bytes alloced from system
3016 uint64_t thread_bytes
; // Bytes in thread caches
3017 uint64_t central_bytes
; // Bytes in central cache
3018 uint64_t transfer_bytes
; // Bytes in central transfer cache
3019 uint64_t pageheap_bytes
; // Bytes in page heap
3020 uint64_t metadata_bytes
; // Bytes alloced for metadata
3024 // Get stats into "r". Also get per-size-class counts if class_count != NULL
3025 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
) {
3026 r
->central_bytes
= 0;
3027 r
->transfer_bytes
= 0;
3028 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
3029 const int length
= central_cache
[cl
].length();
3030 const int tc_length
= central_cache
[cl
].tc_length();
3031 r
->central_bytes
+= static_cast<uint64_t>(ByteSizeForClass(cl
)) * length
;
3032 r
->transfer_bytes
+=
3033 static_cast<uint64_t>(ByteSizeForClass(cl
)) * tc_length
;
3034 if (class_count
) class_count
[cl
] = length
+ tc_length
;
3037 // Add stats from per-thread heaps
3038 r
->thread_bytes
= 0;
3040 SpinLockHolder
h(&pageheap_lock
);
3041 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
3042 r
->thread_bytes
+= h
->Size();
3044 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
3045 class_count
[cl
] += h
->freelist_length(cl
);
3052 SpinLockHolder
h(&pageheap_lock
);
3053 r
->system_bytes
= pageheap
->SystemBytes();
3054 r
->metadata_bytes
= metadata_system_bytes
;
3055 r
->pageheap_bytes
= pageheap
->FreeBytes();
3061 // WRITE stats to "out"
3062 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
3063 TCMallocStats stats
;
3064 uint64_t class_count
[kNumClasses
];
3065 ExtractStats(&stats
, (level
>= 2 ? class_count
: NULL
));
3068 out
->printf("------------------------------------------------\n");
3069 uint64_t cumulative
= 0;
3070 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
3071 if (class_count
[cl
] > 0) {
3072 uint64_t class_bytes
= class_count
[cl
] * ByteSizeForClass(cl
);
3073 cumulative
+= class_bytes
;
3074 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
3075 "%8" PRIu64
" objs; %5.1f MB; %5.1f cum MB\n",
3076 cl
, ByteSizeForClass(cl
),
3078 class_bytes
/ 1048576.0,
3079 cumulative
/ 1048576.0);
3083 SpinLockHolder
h(&pageheap_lock
);
3084 pageheap
->Dump(out
);
3087 const uint64_t bytes_in_use
= stats
.system_bytes
3088 - stats
.pageheap_bytes
3089 - stats
.central_bytes
3090 - stats
.transfer_bytes
3091 - stats
.thread_bytes
;
3093 out
->printf("------------------------------------------------\n"
3094 "MALLOC: %12" PRIu64
" Heap size\n"
3095 "MALLOC: %12" PRIu64
" Bytes in use by application\n"
3096 "MALLOC: %12" PRIu64
" Bytes free in page heap\n"
3097 "MALLOC: %12" PRIu64
" Bytes free in central cache\n"
3098 "MALLOC: %12" PRIu64
" Bytes free in transfer cache\n"
3099 "MALLOC: %12" PRIu64
" Bytes free in thread caches\n"
3100 "MALLOC: %12" PRIu64
" Spans in use\n"
3101 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
3102 "MALLOC: %12" PRIu64
" Metadata allocated\n"
3103 "------------------------------------------------\n",
3106 stats
.pageheap_bytes
,
3107 stats
.central_bytes
,
3108 stats
.transfer_bytes
,
3110 uint64_t(span_allocator
.inuse()),
3111 uint64_t(threadheap_allocator
.inuse()),
3112 stats
.metadata_bytes
);
3115 static void PrintStats(int level
) {
3116 const int kBufferSize
= 16 << 10;
3117 char* buffer
= new char[kBufferSize
];
3118 TCMalloc_Printer
printer(buffer
, kBufferSize
);
3119 DumpStats(&printer
, level
);
3120 write(STDERR_FILENO
, buffer
, strlen(buffer
));
3124 static void** DumpStackTraces() {
3125 // Count how much space we need
3126 int needed_slots
= 0;
3128 SpinLockHolder
h(&pageheap_lock
);
3129 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
3130 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
3131 needed_slots
+= 3 + stack
->depth
;
3133 needed_slots
+= 100; // Slop in case sample grows
3134 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
3137 void** result
= new void*[needed_slots
];
3138 if (result
== NULL
) {
3139 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
3144 SpinLockHolder
h(&pageheap_lock
);
3146 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
3147 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
3148 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
3149 if (used_slots
+ 3 + stack
->depth
>= needed_slots
) {
3154 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
3155 result
[used_slots
+1] = reinterpret_cast<void*>(stack
->size
);
3156 result
[used_slots
+2] = reinterpret_cast<void*>(stack
->depth
);
3157 for (int d
= 0; d
< stack
->depth
; d
++) {
3158 result
[used_slots
+3+d
] = stack
->stack
[d
];
3160 used_slots
+= 3 + stack
->depth
;
3162 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
3169 // TCMalloc's support for extra malloc interfaces
3170 class TCMallocImplementation
: public MallocExtension
{
3172 virtual void GetStats(char* buffer
, int buffer_length
) {
3173 ASSERT(buffer_length
> 0);
3174 TCMalloc_Printer
printer(buffer
, buffer_length
);
3176 // Print level one stats unless lots of space is available
3177 if (buffer_length
< 10000) {
3178 DumpStats(&printer
, 1);
3180 DumpStats(&printer
, 2);
3184 virtual void** ReadStackTraces() {
3185 return DumpStackTraces();
3188 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
3189 ASSERT(name
!= NULL
);
3191 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
3192 TCMallocStats stats
;
3193 ExtractStats(&stats
, NULL
);
3194 *value
= stats
.system_bytes
3195 - stats
.thread_bytes
3196 - stats
.central_bytes
3197 - stats
.pageheap_bytes
;
3201 if (strcmp(name
, "generic.heap_size") == 0) {
3202 TCMallocStats stats
;
3203 ExtractStats(&stats
, NULL
);
3204 *value
= stats
.system_bytes
;
3208 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
3209 // We assume that bytes in the page heap are not fragmented too
3210 // badly, and are therefore available for allocation.
3211 SpinLockHolder
l(&pageheap_lock
);
3212 *value
= pageheap
->FreeBytes();
3216 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3217 SpinLockHolder
l(&pageheap_lock
);
3218 *value
= overall_thread_cache_size
;
3222 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
3223 TCMallocStats stats
;
3224 ExtractStats(&stats
, NULL
);
3225 *value
= stats
.thread_bytes
;
3232 virtual bool SetNumericProperty(const char* name
, size_t value
) {
3233 ASSERT(name
!= NULL
);
3235 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3236 // Clip the value to a reasonable range
3237 if (value
< kMinThreadCacheSize
) value
= kMinThreadCacheSize
;
3238 if (value
> (1<<30)) value
= (1<<30); // Limit to 1GB
3240 SpinLockHolder
l(&pageheap_lock
);
3241 overall_thread_cache_size
= static_cast<size_t>(value
);
3242 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
3249 virtual void MarkThreadIdle() {
3250 TCMalloc_ThreadCache::BecomeIdle();
3253 virtual void ReleaseFreeMemory() {
3254 SpinLockHolder
h(&pageheap_lock
);
3255 pageheap
->ReleaseFreePages();
3260 // The constructor allocates an object to ensure that initialization
3261 // runs before main(), and therefore we do not have a chance to become
3262 // multi-threaded before initialization. We also create the TSD key
3263 // here. Presumably by the time this constructor runs, glibc is in
3264 // good enough shape to handle pthread_key_create().
3266 // The constructor also takes the opportunity to tell STL to use
3267 // tcmalloc. We want to do this early, before construct time, so
3268 // all user STL allocations go through tcmalloc (which works really
3271 // The destructor prints stats when the program exits.
3272 class TCMallocGuard
{
3276 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3277 // Check whether the kernel also supports TLS (needs to happen at runtime)
3278 CheckIfKernelSupportsTLS();
3281 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
3282 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
3286 TCMalloc_ThreadCache::InitTSD();
3289 MallocExtension::Register(new TCMallocImplementation
);
3295 const char* env
= getenv("MALLOCSTATS");
3297 int level
= atoi(env
);
3298 if (level
< 1) level
= 1;
3302 UnpatchWindowsFunctions();
3309 static TCMallocGuard module_enter_exit_hook
;
3313 //-------------------------------------------------------------------
3314 // Helpers for the exported routines below
3315 //-------------------------------------------------------------------
3319 static Span
* DoSampledAllocation(size_t size
) {
3321 // Grab the stack trace outside the heap lock
3323 tmp
.depth
= GetStackTrace(tmp
.stack
, kMaxStackDepth
, 1);
3326 SpinLockHolder
h(&pageheap_lock
);
3328 Span
*span
= pageheap
->New(pages(size
== 0 ? 1 : size
));
3333 // Allocate stack trace
3334 StackTrace
*stack
= stacktrace_allocator
.New();
3335 if (stack
== NULL
) {
3336 // Sampling failed because of lack of memory
3342 span
->objects
= stack
;
3343 DLL_Prepend(&sampled_objects
, span
);
3349 static inline bool CheckCachedSizeClass(void *ptr
) {
3350 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3351 size_t cached_value
= pageheap
->GetSizeClassIfCached(p
);
3352 return cached_value
== 0 ||
3353 cached_value
== pageheap
->GetDescriptor(p
)->sizeclass
;
3356 static inline void* CheckedMallocResult(void *result
)
3358 ASSERT(result
== 0 || CheckCachedSizeClass(result
));
3362 static inline void* SpanToMallocResult(Span
*span
) {
3363 ASSERT_SPAN_COMMITTED(span
);
3364 pageheap
->CacheSizeClass(span
->start
, 0);
3366 CheckedMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
3370 template <bool crashOnFailure
>
3372 static ALWAYS_INLINE
void* do_malloc(size_t size
) {
3376 ASSERT(!isForbidden());
3379 // The following call forces module initialization
3380 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3382 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
3383 Span
* span
= DoSampledAllocation(size
);
3385 ret
= SpanToMallocResult(span
);
3389 if (size
> kMaxSize
) {
3390 // Use page-level allocator
3391 SpinLockHolder
h(&pageheap_lock
);
3392 Span
* span
= pageheap
->New(pages(size
));
3394 ret
= SpanToMallocResult(span
);
3397 // The common case, and also the simplest. This just pops the
3398 // size-appropriate freelist, afer replenishing it if it's empty.
3399 ret
= CheckedMallocResult(heap
->Allocate(size
));
3403 if (crashOnFailure
) // This branch should be optimized out by the compiler.
3412 static ALWAYS_INLINE
void do_free(void* ptr
) {
3413 if (ptr
== NULL
) return;
3414 ASSERT(pageheap
!= NULL
); // Should not call free() before malloc()
3415 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3417 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3420 span
= pageheap
->GetDescriptor(p
);
3421 cl
= span
->sizeclass
;
3422 pageheap
->CacheSizeClass(p
, cl
);
3425 #ifndef NO_TCMALLOC_SAMPLES
3426 ASSERT(!pageheap
->GetDescriptor(p
)->sample
);
3428 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCacheIfPresent();
3430 heap
->Deallocate(ptr
, cl
);
3432 // Delete directly into central cache
3433 SLL_SetNext(ptr
, NULL
);
3434 central_cache
[cl
].InsertRange(ptr
, ptr
, 1);
3437 SpinLockHolder
h(&pageheap_lock
);
3438 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
3439 ASSERT(span
!= NULL
&& span
->start
== p
);
3440 #ifndef NO_TCMALLOC_SAMPLES
3443 stacktrace_allocator
.Delete(reinterpret_cast<StackTrace
*>(span
->objects
));
3444 span
->objects
= NULL
;
3447 pageheap
->Delete(span
);
3452 // For use by exported routines below that want specific alignments
3454 // Note: this code can be slow, and can significantly fragment memory.
3455 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
3456 // not be invoked very often. This requirement simplifies our
3457 // implementation and allows us to tune for expected allocation
3459 static void* do_memalign(size_t align
, size_t size
) {
3460 ASSERT((align
& (align
- 1)) == 0);
3462 if (pageheap
== NULL
) TCMalloc_ThreadCache::InitModule();
3464 // Allocate at least one byte to avoid boundary conditions below
3465 if (size
== 0) size
= 1;
3467 if (size
<= kMaxSize
&& align
< kPageSize
) {
3468 // Search through acceptable size classes looking for one with
3469 // enough alignment. This depends on the fact that
3470 // InitSizeClasses() currently produces several size classes that
3471 // are aligned at powers of two. We will waste time and space if
3472 // we miss in the size class array, but that is deemed acceptable
3473 // since memalign() should be used rarely.
3474 size_t cl
= SizeClass(size
);
3475 while (cl
< kNumClasses
&& ((class_to_size
[cl
] & (align
- 1)) != 0)) {
3478 if (cl
< kNumClasses
) {
3479 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3480 return CheckedMallocResult(heap
->Allocate(class_to_size
[cl
]));
3484 // We will allocate directly from the page heap
3485 SpinLockHolder
h(&pageheap_lock
);
3487 if (align
<= kPageSize
) {
3488 // Any page-level allocation will be fine
3489 // TODO: We could put the rest of this page in the appropriate
3490 // TODO: cache but it does not seem worth it.
3491 Span
* span
= pageheap
->New(pages(size
));
3492 return span
== NULL
? NULL
: SpanToMallocResult(span
);
3495 // Allocate extra pages and carve off an aligned portion
3496 const Length alloc
= pages(size
+ align
);
3497 Span
* span
= pageheap
->New(alloc
);
3498 if (span
== NULL
) return NULL
;
3500 // Skip starting portion so that we end up aligned
3502 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
3505 ASSERT(skip
< alloc
);
3507 Span
* rest
= pageheap
->Split(span
, skip
);
3508 pageheap
->Delete(span
);
3512 // Skip trailing portion that we do not need to return
3513 const Length needed
= pages(size
);
3514 ASSERT(span
->length
>= needed
);
3515 if (span
->length
> needed
) {
3516 Span
* trailer
= pageheap
->Split(span
, needed
);
3517 pageheap
->Delete(trailer
);
3519 return SpanToMallocResult(span
);
3523 // Helpers for use by exported routines below:
3526 static inline void do_malloc_stats() {
3531 static inline int do_mallopt(int, int) {
3532 return 1; // Indicates error
3535 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3536 static inline struct mallinfo
do_mallinfo() {
3537 TCMallocStats stats
;
3538 ExtractStats(&stats
, NULL
);
3540 // Just some of the fields are filled in.
3541 struct mallinfo info
;
3542 memset(&info
, 0, sizeof(info
));
3544 // Unfortunately, the struct contains "int" field, so some of the
3545 // size values will be truncated.
3546 info
.arena
= static_cast<int>(stats
.system_bytes
);
3547 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
3548 + stats
.central_bytes
3549 + stats
.transfer_bytes
);
3550 info
.fordblks
= static_cast<int>(stats
.pageheap_bytes
);
3551 info
.uordblks
= static_cast<int>(stats
.system_bytes
3552 - stats
.thread_bytes
3553 - stats
.central_bytes
3554 - stats
.transfer_bytes
3555 - stats
.pageheap_bytes
);
3561 //-------------------------------------------------------------------
3562 // Exported routines
3563 //-------------------------------------------------------------------
3565 // CAVEAT: The code structure below ensures that MallocHook methods are always
3566 // called from the stack frame of the invoked allocation function.
3567 // heap-checker.cc depends on this to start a stack trace from
3568 // the call to the (de)allocation function.
3573 #define do_malloc do_malloc<crashOnFailure>
3575 template <bool crashOnFailure
>
3576 void* malloc(size_t);
3578 void* fastMalloc(size_t size
)
3580 return malloc
<true>(size
);
3583 void* tryFastMalloc(size_t size
)
3585 return malloc
<false>(size
);
3588 template <bool crashOnFailure
>
3591 void* malloc(size_t size
) {
3592 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3593 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= size
) // If overflow would occur...
3595 size
+= sizeof(AllocAlignmentInteger
);
3596 void* result
= do_malloc(size
);
3600 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
3601 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
3603 void* result
= do_malloc(size
);
3607 MallocHook::InvokeNewHook(result
, size
);
3615 void free(void* ptr
) {
3617 MallocHook::InvokeDeleteHook(ptr
);
3620 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3624 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(ptr
);
3625 if (*header
!= Internal::AllocTypeMalloc
)
3626 Internal::fastMallocMatchFailed(ptr
);
3636 template <bool crashOnFailure
>
3637 void* calloc(size_t, size_t);
3639 void* fastCalloc(size_t n
, size_t elem_size
)
3641 return calloc
<true>(n
, elem_size
);
3644 void* tryFastCalloc(size_t n
, size_t elem_size
)
3646 return calloc
<false>(n
, elem_size
);
3649 template <bool crashOnFailure
>
3652 void* calloc(size_t n
, size_t elem_size
) {
3653 size_t totalBytes
= n
* elem_size
;
3655 // Protect against overflow
3656 if (n
> 1 && elem_size
&& (totalBytes
/ elem_size
) != n
)
3659 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3660 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= totalBytes
) // If overflow would occur...
3663 totalBytes
+= sizeof(AllocAlignmentInteger
);
3664 void* result
= do_malloc(totalBytes
);
3668 memset(result
, 0, totalBytes
);
3669 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
3670 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
3672 void* result
= do_malloc(totalBytes
);
3673 if (result
!= NULL
) {
3674 memset(result
, 0, totalBytes
);
3679 MallocHook::InvokeNewHook(result
, totalBytes
);
3684 // Since cfree isn't used anywhere, we don't compile it in.
3689 void cfree(void* ptr
) {
3691 MallocHook::InvokeDeleteHook(ptr
);
3700 template <bool crashOnFailure
>
3701 void* realloc(void*, size_t);
3703 void* fastRealloc(void* old_ptr
, size_t new_size
)
3705 return realloc
<true>(old_ptr
, new_size
);
3708 void* tryFastRealloc(void* old_ptr
, size_t new_size
)
3710 return realloc
<false>(old_ptr
, new_size
);
3713 template <bool crashOnFailure
>
3716 void* realloc(void* old_ptr
, size_t new_size
) {
3717 if (old_ptr
== NULL
) {
3718 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3719 void* result
= malloc(new_size
);
3721 void* result
= do_malloc(new_size
);
3723 MallocHook::InvokeNewHook(result
, new_size
);
3728 if (new_size
== 0) {
3730 MallocHook::InvokeDeleteHook(old_ptr
);
3736 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3737 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= new_size
) // If overflow would occur...
3739 new_size
+= sizeof(AllocAlignmentInteger
);
3740 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(old_ptr
);
3741 if (*header
!= Internal::AllocTypeMalloc
)
3742 Internal::fastMallocMatchFailed(old_ptr
);
3746 // Get the size of the old entry
3747 const PageID p
= reinterpret_cast<uintptr_t>(old_ptr
) >> kPageShift
;
3748 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3752 span
= pageheap
->GetDescriptor(p
);
3753 cl
= span
->sizeclass
;
3754 pageheap
->CacheSizeClass(p
, cl
);
3757 old_size
= ByteSizeForClass(cl
);
3759 ASSERT(span
!= NULL
);
3760 old_size
= span
->length
<< kPageShift
;
3763 // Reallocate if the new size is larger than the old size,
3764 // or if the new size is significantly smaller than the old size.
3765 if ((new_size
> old_size
) || (AllocationSize(new_size
) < old_size
)) {
3766 // Need to reallocate
3767 void* new_ptr
= do_malloc(new_size
);
3768 if (new_ptr
== NULL
) {
3772 MallocHook::InvokeNewHook(new_ptr
, new_size
);
3774 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
3776 MallocHook::InvokeDeleteHook(old_ptr
);
3778 // We could use a variant of do_free() that leverages the fact
3779 // that we already know the sizeclass of old_ptr. The benefit
3780 // would be small, so don't bother.
3782 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3783 new_ptr
= static_cast<AllocAlignmentInteger
*>(new_ptr
) + 1;
3787 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3788 old_ptr
= pByte
+ sizeof(AllocAlignmentInteger
); // Set old_ptr back to the user pointer.
3798 static SpinLock set_new_handler_lock
= SPINLOCK_INITIALIZER
;
3800 static inline void* cpp_alloc(size_t size
, bool nothrow
) {
3802 void* p
= do_malloc(size
);
3806 if (p
== NULL
) { // allocation failed
3807 // Get the current new handler. NB: this function is not
3808 // thread-safe. We make a feeble stab at making it so here, but
3809 // this lock only protects against tcmalloc interfering with
3810 // itself, not with other libraries calling set_new_handler.
3811 std::new_handler nh
;
3813 SpinLockHolder
h(&set_new_handler_lock
);
3814 nh
= std::set_new_handler(0);
3815 (void) std::set_new_handler(nh
);
3817 // If no new_handler is established, the allocation failed.
3819 if (nothrow
) return 0;
3820 throw std::bad_alloc();
3822 // Otherwise, try the new_handler. If it returns, retry the
3823 // allocation. If it throws std::bad_alloc, fail the allocation.
3824 // if it throws something else, don't interfere.
3827 } catch (const std::bad_alloc
&) {
3828 if (!nothrow
) throw;
3831 } else { // allocation success
3838 void* operator new(size_t size
) {
3839 void* p
= cpp_alloc(size
, false);
3840 // We keep this next instruction out of cpp_alloc for a reason: when
3841 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3842 // new call into cpp_alloc, which messes up our whole section-based
3843 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3844 // isn't the last thing this fn calls, and prevents the folding.
3845 MallocHook::InvokeNewHook(p
, size
);
3849 void* operator new(size_t size
, const std::nothrow_t
&) __THROW
{
3850 void* p
= cpp_alloc(size
, true);
3851 MallocHook::InvokeNewHook(p
, size
);
3855 void operator delete(void* p
) __THROW
{
3856 MallocHook::InvokeDeleteHook(p
);
3860 void operator delete(void* p
, const std::nothrow_t
&) __THROW
{
3861 MallocHook::InvokeDeleteHook(p
);
3865 void* operator new[](size_t size
) {
3866 void* p
= cpp_alloc(size
, false);
3867 // We keep this next instruction out of cpp_alloc for a reason: when
3868 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3869 // new call into cpp_alloc, which messes up our whole section-based
3870 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3871 // isn't the last thing this fn calls, and prevents the folding.
3872 MallocHook::InvokeNewHook(p
, size
);
3876 void* operator new[](size_t size
, const std::nothrow_t
&) __THROW
{
3877 void* p
= cpp_alloc(size
, true);
3878 MallocHook::InvokeNewHook(p
, size
);
3882 void operator delete[](void* p
) __THROW
{
3883 MallocHook::InvokeDeleteHook(p
);
3887 void operator delete[](void* p
, const std::nothrow_t
&) __THROW
{
3888 MallocHook::InvokeDeleteHook(p
);
3892 extern "C" void* memalign(size_t align
, size_t size
) __THROW
{
3893 void* result
= do_memalign(align
, size
);
3894 MallocHook::InvokeNewHook(result
, size
);
3898 extern "C" int posix_memalign(void** result_ptr
, size_t align
, size_t size
)
3900 if (((align
% sizeof(void*)) != 0) ||
3901 ((align
& (align
- 1)) != 0) ||
3906 void* result
= do_memalign(align
, size
);
3907 MallocHook::InvokeNewHook(result
, size
);
3908 if (result
== NULL
) {
3911 *result_ptr
= result
;
3916 static size_t pagesize
= 0;
3918 extern "C" void* valloc(size_t size
) __THROW
{
3919 // Allocate page-aligned object of length >= size bytes
3920 if (pagesize
== 0) pagesize
= getpagesize();
3921 void* result
= do_memalign(pagesize
, size
);
3922 MallocHook::InvokeNewHook(result
, size
);
3926 extern "C" void* pvalloc(size_t size
) __THROW
{
3927 // Round up size to a multiple of pagesize
3928 if (pagesize
== 0) pagesize
= getpagesize();
3929 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
3930 void* result
= do_memalign(pagesize
, size
);
3931 MallocHook::InvokeNewHook(result
, size
);
3935 extern "C" void malloc_stats(void) {
3939 extern "C" int mallopt(int cmd
, int value
) {
3940 return do_mallopt(cmd
, value
);
3943 #ifdef HAVE_STRUCT_MALLINFO
3944 extern "C" struct mallinfo
mallinfo(void) {
3945 return do_mallinfo();
3949 //-------------------------------------------------------------------
3950 // Some library routines on RedHat 9 allocate memory using malloc()
3951 // and free it using __libc_free() (or vice-versa). Since we provide
3952 // our own implementations of malloc/free, we need to make sure that
3953 // the __libc_XXX variants (defined as part of glibc) also point to
3954 // the same implementations.
3955 //-------------------------------------------------------------------
3957 #if defined(__GLIBC__)
3959 #if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
3960 // Potentially faster variants that use the gcc alias extension.
3961 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
3962 # define ALIAS(x) __attribute__ ((weak, alias (x)))
3963 void* __libc_malloc(size_t size
) ALIAS("malloc");
3964 void __libc_free(void* ptr
) ALIAS("free");
3965 void* __libc_realloc(void* ptr
, size_t size
) ALIAS("realloc");
3966 void* __libc_calloc(size_t n
, size_t size
) ALIAS("calloc");
3967 void __libc_cfree(void* ptr
) ALIAS("cfree");
3968 void* __libc_memalign(size_t align
, size_t s
) ALIAS("memalign");
3969 void* __libc_valloc(size_t size
) ALIAS("valloc");
3970 void* __libc_pvalloc(size_t size
) ALIAS("pvalloc");
3971 int __posix_memalign(void** r
, size_t a
, size_t s
) ALIAS("posix_memalign");
3973 # else /* not __GNUC__ */
3974 // Portable wrappers
3975 void* __libc_malloc(size_t size
) { return malloc(size
); }
3976 void __libc_free(void* ptr
) { free(ptr
); }
3977 void* __libc_realloc(void* ptr
, size_t size
) { return realloc(ptr
, size
); }
3978 void* __libc_calloc(size_t n
, size_t size
) { return calloc(n
, size
); }
3979 void __libc_cfree(void* ptr
) { cfree(ptr
); }
3980 void* __libc_memalign(size_t align
, size_t s
) { return memalign(align
, s
); }
3981 void* __libc_valloc(size_t size
) { return valloc(size
); }
3982 void* __libc_pvalloc(size_t size
) { return pvalloc(size
); }
3983 int __posix_memalign(void** r
, size_t a
, size_t s
) {
3984 return posix_memalign(r
, a
, s
);
3986 # endif /* __GNUC__ */
3988 #endif /* __GLIBC__ */
3990 // Override __libc_memalign in libc on linux boxes specially.
3991 // They have a bug in libc that causes them to (very rarely) allocate
3992 // with __libc_memalign() yet deallocate with free() and the
3993 // definitions above don't catch it.
3994 // This function is an exception to the rule of calling MallocHook method
3995 // from the stack frame of the allocation function;
3996 // heap-checker handles this special case explicitly.
3997 static void *MemalignOverride(size_t align
, size_t size
, const void *caller
)
3999 void* result
= do_memalign(align
, size
);
4000 MallocHook::InvokeNewHook(result
, size
);
4003 void *(*__memalign_hook
)(size_t, size_t, const void *) = MemalignOverride
;
4007 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
4009 class FreeObjectFinder
{
4010 const RemoteMemoryReader
& m_reader
;
4011 HashSet
<void*> m_freeObjects
;
4014 FreeObjectFinder(const RemoteMemoryReader
& reader
) : m_reader(reader
) { }
4016 void visit(void* ptr
) { m_freeObjects
.add(ptr
); }
4017 bool isFreeObject(void* ptr
) const { return m_freeObjects
.contains(ptr
); }
4018 bool isFreeObject(vm_address_t ptr
) const { return isFreeObject(reinterpret_cast<void*>(ptr
)); }
4019 size_t freeObjectCount() const { return m_freeObjects
.size(); }
4021 void findFreeObjects(TCMalloc_ThreadCache
* threadCache
)
4023 for (; threadCache
; threadCache
= (threadCache
->next_
? m_reader(threadCache
->next_
) : 0))
4024 threadCache
->enumerateFreeObjects(*this, m_reader
);
4027 void findFreeObjects(TCMalloc_Central_FreeListPadded
* centralFreeList
, size_t numSizes
, TCMalloc_Central_FreeListPadded
* remoteCentralFreeList
)
4029 for (unsigned i
= 0; i
< numSizes
; i
++)
4030 centralFreeList
[i
].enumerateFreeObjects(*this, m_reader
, remoteCentralFreeList
+ i
);
4034 class PageMapFreeObjectFinder
{
4035 const RemoteMemoryReader
& m_reader
;
4036 FreeObjectFinder
& m_freeObjectFinder
;
4039 PageMapFreeObjectFinder(const RemoteMemoryReader
& reader
, FreeObjectFinder
& freeObjectFinder
)
4041 , m_freeObjectFinder(freeObjectFinder
)
4044 int visit(void* ptr
) const
4049 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
4051 void* ptr
= reinterpret_cast<void*>(span
->start
<< kPageShift
);
4052 m_freeObjectFinder
.visit(ptr
);
4053 } else if (span
->sizeclass
) {
4054 // Walk the free list of the small-object span, keeping track of each object seen
4055 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *m_reader(reinterpret_cast<void**>(nextObject
)))
4056 m_freeObjectFinder
.visit(nextObject
);
4058 return span
->length
;
4062 class PageMapMemoryUsageRecorder
{
4065 unsigned m_typeMask
;
4066 vm_range_recorder_t
* m_recorder
;
4067 const RemoteMemoryReader
& m_reader
;
4068 const FreeObjectFinder
& m_freeObjectFinder
;
4070 HashSet
<void*> m_seenPointers
;
4071 Vector
<Span
*> m_coalescedSpans
;
4074 PageMapMemoryUsageRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
, const FreeObjectFinder
& freeObjectFinder
)
4076 , m_context(context
)
4077 , m_typeMask(typeMask
)
4078 , m_recorder(recorder
)
4080 , m_freeObjectFinder(freeObjectFinder
)
4083 ~PageMapMemoryUsageRecorder()
4085 ASSERT(!m_coalescedSpans
.size());
4088 void recordPendingRegions()
4090 Span
* lastSpan
= m_coalescedSpans
[m_coalescedSpans
.size() - 1];
4091 vm_range_t ptrRange
= { m_coalescedSpans
[0]->start
<< kPageShift
, 0 };
4092 ptrRange
.size
= (lastSpan
->start
<< kPageShift
) - ptrRange
.address
+ (lastSpan
->length
* kPageSize
);
4094 // Mark the memory region the spans represent as a candidate for containing pointers
4095 if (m_typeMask
& MALLOC_PTR_REGION_RANGE_TYPE
)
4096 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptrRange
, 1);
4098 if (!(m_typeMask
& MALLOC_PTR_IN_USE_RANGE_TYPE
)) {
4099 m_coalescedSpans
.clear();
4103 Vector
<vm_range_t
, 1024> allocatedPointers
;
4104 for (size_t i
= 0; i
< m_coalescedSpans
.size(); ++i
) {
4105 Span
*theSpan
= m_coalescedSpans
[i
];
4109 vm_address_t spanStartAddress
= theSpan
->start
<< kPageShift
;
4110 vm_size_t spanSizeInBytes
= theSpan
->length
* kPageSize
;
4112 if (!theSpan
->sizeclass
) {
4113 // If it's an allocated large object span, mark it as in use
4114 if (!m_freeObjectFinder
.isFreeObject(spanStartAddress
))
4115 allocatedPointers
.append((vm_range_t
){spanStartAddress
, spanSizeInBytes
});
4117 const size_t objectSize
= ByteSizeForClass(theSpan
->sizeclass
);
4119 // Mark each allocated small object within the span as in use
4120 const vm_address_t endOfSpan
= spanStartAddress
+ spanSizeInBytes
;
4121 for (vm_address_t object
= spanStartAddress
; object
+ objectSize
<= endOfSpan
; object
+= objectSize
) {
4122 if (!m_freeObjectFinder
.isFreeObject(object
))
4123 allocatedPointers
.append((vm_range_t
){object
, objectSize
});
4128 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, allocatedPointers
.data(), allocatedPointers
.size());
4130 m_coalescedSpans
.clear();
4133 int visit(void* ptr
)
4138 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
4142 if (m_seenPointers
.contains(ptr
))
4143 return span
->length
;
4144 m_seenPointers
.add(ptr
);
4146 if (!m_coalescedSpans
.size()) {
4147 m_coalescedSpans
.append(span
);
4148 return span
->length
;
4151 Span
* previousSpan
= m_coalescedSpans
[m_coalescedSpans
.size() - 1];
4152 vm_address_t previousSpanStartAddress
= previousSpan
->start
<< kPageShift
;
4153 vm_size_t previousSpanSizeInBytes
= previousSpan
->length
* kPageSize
;
4155 // If the new span is adjacent to the previous span, do nothing for now.
4156 vm_address_t spanStartAddress
= span
->start
<< kPageShift
;
4157 if (spanStartAddress
== previousSpanStartAddress
+ previousSpanSizeInBytes
) {
4158 m_coalescedSpans
.append(span
);
4159 return span
->length
;
4162 // New span is not adjacent to previous span, so record the spans coalesced so far.
4163 recordPendingRegions();
4164 m_coalescedSpans
.append(span
);
4166 return span
->length
;
4170 class AdminRegionRecorder
{
4173 unsigned m_typeMask
;
4174 vm_range_recorder_t
* m_recorder
;
4175 const RemoteMemoryReader
& m_reader
;
4177 Vector
<vm_range_t
, 1024> m_pendingRegions
;
4180 AdminRegionRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
)
4182 , m_context(context
)
4183 , m_typeMask(typeMask
)
4184 , m_recorder(recorder
)
4188 void recordRegion(vm_address_t ptr
, size_t size
)
4190 if (m_typeMask
& MALLOC_ADMIN_REGION_RANGE_TYPE
)
4191 m_pendingRegions
.append((vm_range_t
){ ptr
, size
});
4194 void visit(void *ptr
, size_t size
)
4196 recordRegion(reinterpret_cast<vm_address_t
>(ptr
), size
);
4199 void recordPendingRegions()
4201 if (m_pendingRegions
.size()) {
4202 (*m_recorder
)(m_task
, m_context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, m_pendingRegions
.data(), m_pendingRegions
.size());
4203 m_pendingRegions
.clear();
4207 ~AdminRegionRecorder()
4209 ASSERT(!m_pendingRegions
.size());
4213 kern_return_t
FastMallocZone::enumerate(task_t task
, void* context
, unsigned typeMask
, vm_address_t zoneAddress
, memory_reader_t reader
, vm_range_recorder_t recorder
)
4215 RemoteMemoryReader
memoryReader(task
, reader
);
4219 FastMallocZone
* mzone
= memoryReader(reinterpret_cast<FastMallocZone
*>(zoneAddress
));
4220 TCMalloc_PageHeap
* pageHeap
= memoryReader(mzone
->m_pageHeap
);
4221 TCMalloc_ThreadCache
** threadHeapsPointer
= memoryReader(mzone
->m_threadHeaps
);
4222 TCMalloc_ThreadCache
* threadHeaps
= memoryReader(*threadHeapsPointer
);
4224 TCMalloc_Central_FreeListPadded
* centralCaches
= memoryReader(mzone
->m_centralCaches
, sizeof(TCMalloc_Central_FreeListPadded
) * kNumClasses
);
4226 FreeObjectFinder
finder(memoryReader
);
4227 finder
.findFreeObjects(threadHeaps
);
4228 finder
.findFreeObjects(centralCaches
, kNumClasses
, mzone
->m_centralCaches
);
4230 TCMalloc_PageHeap::PageMap
* pageMap
= &pageHeap
->pagemap_
;
4231 PageMapFreeObjectFinder
pageMapFinder(memoryReader
, finder
);
4232 pageMap
->visitValues(pageMapFinder
, memoryReader
);
4234 PageMapMemoryUsageRecorder
usageRecorder(task
, context
, typeMask
, recorder
, memoryReader
, finder
);
4235 pageMap
->visitValues(usageRecorder
, memoryReader
);
4236 usageRecorder
.recordPendingRegions();
4238 AdminRegionRecorder
adminRegionRecorder(task
, context
, typeMask
, recorder
, memoryReader
);
4239 pageMap
->visitAllocations(adminRegionRecorder
, memoryReader
);
4241 PageHeapAllocator
<Span
>* spanAllocator
= memoryReader(mzone
->m_spanAllocator
);
4242 PageHeapAllocator
<TCMalloc_ThreadCache
>* pageHeapAllocator
= memoryReader(mzone
->m_pageHeapAllocator
);
4244 spanAllocator
->recordAdministrativeRegions(adminRegionRecorder
, memoryReader
);
4245 pageHeapAllocator
->recordAdministrativeRegions(adminRegionRecorder
, memoryReader
);
4247 adminRegionRecorder
.recordPendingRegions();
4252 size_t FastMallocZone::size(malloc_zone_t
*, const void*)
4257 void* FastMallocZone::zoneMalloc(malloc_zone_t
*, size_t)
4262 void* FastMallocZone::zoneCalloc(malloc_zone_t
*, size_t, size_t)
4267 void FastMallocZone::zoneFree(malloc_zone_t
*, void* ptr
)
4269 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
4270 // is not in this zone. When this happens, the pointer being freed was not allocated by any
4271 // zone so we need to print a useful error for the application developer.
4272 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr
);
4275 void* FastMallocZone::zoneRealloc(malloc_zone_t
*, void*, size_t)
4287 malloc_introspection_t jscore_fastmalloc_introspection
= { &FastMallocZone::enumerate
, &FastMallocZone::goodSize
, &FastMallocZone::check
, &FastMallocZone::print
,
4288 &FastMallocZone::log
, &FastMallocZone::forceLock
, &FastMallocZone::forceUnlock
, &FastMallocZone::statistics
4290 #if !PLATFORM(IPHONE_SIMULATOR)
4291 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
4297 FastMallocZone::FastMallocZone(TCMalloc_PageHeap
* pageHeap
, TCMalloc_ThreadCache
** threadHeaps
, TCMalloc_Central_FreeListPadded
* centralCaches
, PageHeapAllocator
<Span
>* spanAllocator
, PageHeapAllocator
<TCMalloc_ThreadCache
>* pageHeapAllocator
)
4298 : m_pageHeap(pageHeap
)
4299 , m_threadHeaps(threadHeaps
)
4300 , m_centralCaches(centralCaches
)
4301 , m_spanAllocator(spanAllocator
)
4302 , m_pageHeapAllocator(pageHeapAllocator
)
4304 memset(&m_zone
, 0, sizeof(m_zone
));
4306 m_zone
.zone_name
= "JavaScriptCore FastMalloc";
4307 m_zone
.size
= &FastMallocZone::size
;
4308 m_zone
.malloc
= &FastMallocZone::zoneMalloc
;
4309 m_zone
.calloc
= &FastMallocZone::zoneCalloc
;
4310 m_zone
.realloc
= &FastMallocZone::zoneRealloc
;
4311 m_zone
.free
= &FastMallocZone::zoneFree
;
4312 m_zone
.valloc
= &FastMallocZone::zoneValloc
;
4313 m_zone
.destroy
= &FastMallocZone::zoneDestroy
;
4314 m_zone
.introspect
= &jscore_fastmalloc_introspection
;
4315 malloc_zone_register(&m_zone
);
4319 void FastMallocZone::init()
4321 static FastMallocZone
zone(pageheap
, &thread_heaps
, static_cast<TCMalloc_Central_FreeListPadded
*>(central_cache
), &span_allocator
, &threadheap_allocator
);
4327 void releaseFastMallocFreeMemory()
4329 // Flush free pages in the current thread cache back to the page heap.
4330 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
4331 // The second pass flushes everything.
4332 if (TCMalloc_ThreadCache
* threadCache
= TCMalloc_ThreadCache::GetCacheIfPresent()) {
4333 threadCache
->Scavenge();
4334 threadCache
->Scavenge();
4337 SpinLockHolder
h(&pageheap_lock
);
4338 pageheap
->ReleaseFreePages();
4341 FastMallocStatistics
fastMallocStatistics()
4343 FastMallocStatistics statistics
;
4345 SpinLockHolder
lockHolder(&pageheap_lock
);
4346 statistics
.heapSize
= static_cast<size_t>(pageheap
->SystemBytes());
4347 statistics
.freeSizeInHeap
= static_cast<size_t>(pageheap
->FreeBytes());
4348 statistics
.returnedSize
= pageheap
->ReturnedBytes();
4349 statistics
.freeSizeInCaches
= 0;
4350 for (TCMalloc_ThreadCache
* threadCache
= thread_heaps
; threadCache
; threadCache
= threadCache
->next_
)
4351 statistics
.freeSizeInCaches
+= threadCache
->Size();
4353 for (unsigned cl
= 0; cl
< kNumClasses
; ++cl
) {
4354 const int length
= central_cache
[cl
].length();
4355 const int tc_length
= central_cache
[cl
].tc_length();
4356 statistics
.freeSizeInCaches
+= ByteSizeForClass(cl
) * (length
+ tc_length
);
4364 #endif // FORCE_SYSTEM_MALLOC