1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "FastMalloc.h"
80 #include "Assertions.h"
82 #if ENABLE(JSC_MULTIPLE_THREADS)
86 #ifndef NO_TCMALLOC_SAMPLES
88 #define NO_TCMALLOC_SAMPLES
92 #if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
93 #define FORCE_SYSTEM_MALLOC 0
95 #define FORCE_SYSTEM_MALLOC 1
98 // Use a background thread to periodically scavenge memory to release back to the system
99 // https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
100 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
105 #if ENABLE(JSC_MULTIPLE_THREADS)
106 static pthread_key_t isForbiddenKey
;
107 static pthread_once_t isForbiddenKeyOnce
= PTHREAD_ONCE_INIT
;
108 static void initializeIsForbiddenKey()
110 pthread_key_create(&isForbiddenKey
, 0);
114 static bool isForbidden()
116 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
117 return !!pthread_getspecific(isForbiddenKey
);
121 void fastMallocForbid()
123 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
124 pthread_setspecific(isForbiddenKey
, &isForbiddenKey
);
127 void fastMallocAllow()
129 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
130 pthread_setspecific(isForbiddenKey
, 0);
135 static bool staticIsForbidden
;
136 static bool isForbidden()
138 return staticIsForbidden
;
141 void fastMallocForbid()
143 staticIsForbidden
= true;
146 void fastMallocAllow()
148 staticIsForbidden
= false;
150 #endif // ENABLE(JSC_MULTIPLE_THREADS)
159 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
163 void fastMallocMatchFailed(void*)
168 } // namespace Internal
172 void* fastZeroedMalloc(size_t n
)
174 void* result
= fastMalloc(n
);
175 memset(result
, 0, n
);
179 char* fastStrDup(const char* src
)
181 int len
= strlen(src
) + 1;
182 char* dup
= static_cast<char*>(fastMalloc(len
));
185 memcpy(dup
, src
, len
);
190 TryMallocReturnValue
tryFastZeroedMalloc(size_t n
)
193 if (!tryFastMalloc(n
).getValue(result
))
195 memset(result
, 0, n
);
201 #if FORCE_SYSTEM_MALLOC
205 TryMallocReturnValue
tryFastMalloc(size_t n
)
207 ASSERT(!isForbidden());
209 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
210 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= n
) // If overflow would occur...
213 void* result
= malloc(n
+ sizeof(AllocAlignmentInteger
));
217 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
218 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
226 void* fastMalloc(size_t n
)
228 ASSERT(!isForbidden());
230 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
231 TryMallocReturnValue returnValue
= tryFastMalloc(n
);
233 returnValue
.getValue(result
);
235 void* result
= malloc(n
);
243 TryMallocReturnValue
tryFastCalloc(size_t n_elements
, size_t element_size
)
245 ASSERT(!isForbidden());
247 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
248 size_t totalBytes
= n_elements
* element_size
;
249 if (n_elements
> 1 && element_size
&& (totalBytes
/ element_size
) != n_elements
|| (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= totalBytes
))
252 totalBytes
+= sizeof(AllocAlignmentInteger
);
253 void* result
= malloc(totalBytes
);
257 memset(result
, 0, totalBytes
);
258 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
259 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
262 return calloc(n_elements
, element_size
);
266 void* fastCalloc(size_t n_elements
, size_t element_size
)
268 ASSERT(!isForbidden());
270 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
271 TryMallocReturnValue returnValue
= tryFastCalloc(n_elements
, element_size
);
273 returnValue
.getValue(result
);
275 void* result
= calloc(n_elements
, element_size
);
283 void fastFree(void* p
)
285 ASSERT(!isForbidden());
287 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
291 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(p
);
292 if (*header
!= Internal::AllocTypeMalloc
)
293 Internal::fastMallocMatchFailed(p
);
300 TryMallocReturnValue
tryFastRealloc(void* p
, size_t n
)
302 ASSERT(!isForbidden());
304 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
306 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= n
) // If overflow would occur...
308 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(p
);
309 if (*header
!= Internal::AllocTypeMalloc
)
310 Internal::fastMallocMatchFailed(p
);
311 void* result
= realloc(header
, n
+ sizeof(AllocAlignmentInteger
));
315 // This should not be needed because the value is already there:
316 // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
317 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
320 return fastMalloc(n
);
323 return realloc(p
, n
);
327 void* fastRealloc(void* p
, size_t n
)
329 ASSERT(!isForbidden());
331 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
332 TryMallocReturnValue returnValue
= tryFastRealloc(p
, n
);
334 returnValue
.getValue(result
);
336 void* result
= realloc(p
, n
);
344 void releaseFastMallocFreeMemory() { }
346 FastMallocStatistics
fastMallocStatistics()
348 FastMallocStatistics statistics
= { 0, 0, 0, 0 };
355 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
356 // It will never be used in this case, so it's type and value are less interesting than its presence.
357 extern "C" const int jscore_fastmalloc_introspection
= 0;
360 #else // FORCE_SYSTEM_MALLOC
364 #elif HAVE(INTTYPES_H)
365 #include <inttypes.h>
367 #include <sys/types.h>
370 #include "AlwaysInline.h"
371 #include "Assertions.h"
372 #include "TCPackedCache.h"
373 #include "TCPageMap.h"
374 #include "TCSpinLock.h"
375 #include "TCSystemAlloc.h"
388 #ifndef WIN32_LEAN_AND_MEAN
389 #define WIN32_LEAN_AND_MEAN
397 #include "MallocZoneSupport.h"
398 #include <wtf/HashSet.h>
399 #include <wtf/Vector.h>
402 #include <dispatch/dispatch.h>
410 // Calling pthread_getspecific through a global function pointer is faster than a normal
411 // call to the function on Mac OS X, and it's used in performance-critical code. So we
412 // use a function pointer. But that's not necessarily faster on other platforms, and we had
413 // problems with this technique on Windows, so we'll do this only on Mac OS X.
415 static void* (*pthread_getspecific_function_pointer
)(pthread_key_t
) = pthread_getspecific
;
416 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
419 #define DEFINE_VARIABLE(type, name, value, meaning) \
420 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
421 type FLAGS_##name(value); \
422 char FLAGS_no##name; \
424 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
426 #define DEFINE_int64(name, value, meaning) \
427 DEFINE_VARIABLE(int64_t, name, value, meaning)
429 #define DEFINE_double(name, value, meaning) \
430 DEFINE_VARIABLE(double, name, value, meaning)
434 #define malloc fastMalloc
435 #define calloc fastCalloc
436 #define free fastFree
437 #define realloc fastRealloc
439 #define MESSAGE LOG_ERROR
440 #define CHECK_CONDITION ASSERT
444 class TCMalloc_Central_FreeListPadded
;
445 class TCMalloc_PageHeap
;
446 class TCMalloc_ThreadCache
;
447 template <typename T
> class PageHeapAllocator
;
449 class FastMallocZone
{
453 static kern_return_t
enumerate(task_t
, void*, unsigned typeMmask
, vm_address_t zoneAddress
, memory_reader_t
, vm_range_recorder_t
);
454 static size_t goodSize(malloc_zone_t
*, size_t size
) { return size
; }
455 static boolean_t
check(malloc_zone_t
*) { return true; }
456 static void print(malloc_zone_t
*, boolean_t
) { }
457 static void log(malloc_zone_t
*, void*) { }
458 static void forceLock(malloc_zone_t
*) { }
459 static void forceUnlock(malloc_zone_t
*) { }
460 static void statistics(malloc_zone_t
*, malloc_statistics_t
* stats
) { memset(stats
, 0, sizeof(malloc_statistics_t
)); }
463 FastMallocZone(TCMalloc_PageHeap
*, TCMalloc_ThreadCache
**, TCMalloc_Central_FreeListPadded
*, PageHeapAllocator
<Span
>*, PageHeapAllocator
<TCMalloc_ThreadCache
>*);
464 static size_t size(malloc_zone_t
*, const void*);
465 static void* zoneMalloc(malloc_zone_t
*, size_t);
466 static void* zoneCalloc(malloc_zone_t
*, size_t numItems
, size_t size
);
467 static void zoneFree(malloc_zone_t
*, void*);
468 static void* zoneRealloc(malloc_zone_t
*, void*, size_t);
469 static void* zoneValloc(malloc_zone_t
*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
470 static void zoneDestroy(malloc_zone_t
*) { }
472 malloc_zone_t m_zone
;
473 TCMalloc_PageHeap
* m_pageHeap
;
474 TCMalloc_ThreadCache
** m_threadHeaps
;
475 TCMalloc_Central_FreeListPadded
* m_centralCaches
;
476 PageHeapAllocator
<Span
>* m_spanAllocator
;
477 PageHeapAllocator
<TCMalloc_ThreadCache
>* m_pageHeapAllocator
;
485 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
486 // you're porting to a system where you really can't get a stacktrace.
487 #ifdef NO_TCMALLOC_SAMPLES
488 // We use #define so code compiles even if you #include stacktrace.h somehow.
489 # define GetStackTrace(stack, depth, skip) (0)
491 # include <google/stacktrace.h>
495 // Even if we have support for thread-local storage in the compiler
496 // and linker, the OS may not support it. We need to check that at
497 // runtime. Right now, we have to keep a manual set of "bad" OSes.
498 #if defined(HAVE_TLS)
499 static bool kernel_supports_tls
= false; // be conservative
500 static inline bool KernelSupportsTLS() {
501 return kernel_supports_tls
;
503 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
504 static void CheckIfKernelSupportsTLS() {
505 kernel_supports_tls
= false;
508 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
509 static void CheckIfKernelSupportsTLS() {
511 if (uname(&buf
) != 0) { // should be impossible
512 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno
);
513 kernel_supports_tls
= false;
514 } else if (strcasecmp(buf
.sysname
, "linux") == 0) {
515 // The linux case: the first kernel to support TLS was 2.6.0
516 if (buf
.release
[0] < '2' && buf
.release
[1] == '.') // 0.x or 1.x
517 kernel_supports_tls
= false;
518 else if (buf
.release
[0] == '2' && buf
.release
[1] == '.' &&
519 buf
.release
[2] >= '0' && buf
.release
[2] < '6' &&
520 buf
.release
[3] == '.') // 2.0 - 2.5
521 kernel_supports_tls
= false;
523 kernel_supports_tls
= true;
524 } else { // some other kernel, we'll be optimisitic
525 kernel_supports_tls
= true;
527 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
529 # endif // HAVE_DECL_UNAME
532 // __THROW is defined in glibc systems. It means, counter-intuitively,
533 // "This function will never throw an exception." It's an optional
534 // optimization tool, but we may need to use it to match glibc prototypes.
535 #ifndef __THROW // I guess we're not on a glibc system
536 # define __THROW // __THROW is just an optimization, so ok to make it ""
539 //-------------------------------------------------------------------
541 //-------------------------------------------------------------------
543 // Not all possible combinations of the following parameters make
544 // sense. In particular, if kMaxSize increases, you may have to
545 // increase kNumClasses as well.
546 static const size_t kPageShift
= 12;
547 static const size_t kPageSize
= 1 << kPageShift
;
548 static const size_t kMaxSize
= 8u * kPageSize
;
549 static const size_t kAlignShift
= 3;
550 static const size_t kAlignment
= 1 << kAlignShift
;
551 static const size_t kNumClasses
= 68;
553 // Allocates a big block of memory for the pagemap once we reach more than
555 static const size_t kPageMapBigAllocationThreshold
= 128 << 20;
557 // Minimum number of pages to fetch from system at a time. Must be
558 // significantly bigger than kPageSize to amortize system-call
559 // overhead, and also to reduce external fragementation. Also, we
560 // should keep this value big because various incarnations of Linux
561 // have small limits on the number of mmap() regions per
563 static const size_t kMinSystemAlloc
= 1 << (20 - kPageShift
);
565 // Number of objects to move between a per-thread list and a central
566 // list in one shot. We want this to be not too small so we can
567 // amortize the lock overhead for accessing the central list. Making
568 // it too big may temporarily cause unnecessary memory wastage in the
569 // per-thread free list until the scavenger cleans up the list.
570 static int num_objects_to_move
[kNumClasses
];
572 // Maximum length we allow a per-thread free-list to have before we
573 // move objects from it into the corresponding central free-list. We
574 // want this big to avoid locking the central free-list too often. It
575 // should not hurt to make this list somewhat big because the
576 // scavenging code will shrink it down when its contents are not in use.
577 static const int kMaxFreeListLength
= 256;
579 // Lower and upper bounds on the per-thread cache sizes
580 static const size_t kMinThreadCacheSize
= kMaxSize
* 2;
581 static const size_t kMaxThreadCacheSize
= 512 * 1024;
583 // Default bound on the total amount of thread caches
584 static const size_t kDefaultOverallThreadCacheSize
= 16 << 20;
586 // For all span-lengths < kMaxPages we keep an exact-size list.
587 // REQUIRED: kMaxPages >= kMinSystemAlloc;
588 static const size_t kMaxPages
= kMinSystemAlloc
;
590 /* The smallest prime > 2^n */
591 static int primes_list
[] = {
592 // Small values might cause high rates of sampling
593 // and hence commented out.
594 // 2, 5, 11, 17, 37, 67, 131, 257,
595 // 521, 1031, 2053, 4099, 8209, 16411,
596 32771, 65537, 131101, 262147, 524309, 1048583,
597 2097169, 4194319, 8388617, 16777259, 33554467 };
599 // Twice the approximate gap between sampling actions.
600 // I.e., we take one sample approximately once every
601 // tcmalloc_sample_parameter/2
602 // bytes of allocation, i.e., ~ once every 128KB.
603 // Must be a prime number.
604 #ifdef NO_TCMALLOC_SAMPLES
605 DEFINE_int64(tcmalloc_sample_parameter
, 0,
606 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
607 static size_t sample_period
= 0;
609 DEFINE_int64(tcmalloc_sample_parameter
, 262147,
610 "Twice the approximate gap between sampling actions."
611 " Must be a prime number. Otherwise will be rounded up to a "
612 " larger prime number");
613 static size_t sample_period
= 262147;
616 // Protects sample_period above
617 static SpinLock sample_period_lock
= SPINLOCK_INITIALIZER
;
619 // Parameters for controlling how fast memory is returned to the OS.
621 DEFINE_double(tcmalloc_release_rate
, 1,
622 "Rate at which we release unused memory to the system. "
623 "Zero means we never release memory back to the system. "
624 "Increase this flag to return memory faster; decrease it "
625 "to return memory slower. Reasonable rates are in the "
628 //-------------------------------------------------------------------
629 // Mapping from size to size_class and vice versa
630 //-------------------------------------------------------------------
632 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
633 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
634 // So for these larger sizes we have an array indexed by ceil(size/128).
636 // We flatten both logical arrays into one physical array and use
637 // arithmetic to compute an appropriate index. The constants used by
638 // ClassIndex() were selected to make the flattening work.
641 // Size Expression Index
642 // -------------------------------------------------------
646 // 1024 (1024 + 7) / 8 128
647 // 1025 (1025 + 127 + (120<<7)) / 128 129
649 // 32768 (32768 + 127 + (120<<7)) / 128 376
650 static const size_t kMaxSmallSize
= 1024;
651 static const int shift_amount
[2] = { 3, 7 }; // For divides by 8 or 128
652 static const int add_amount
[2] = { 7, 127 + (120 << 7) };
653 static unsigned char class_array
[377];
655 // Compute index of the class_array[] entry for a given size
656 static inline int ClassIndex(size_t s
) {
657 const int i
= (s
> kMaxSmallSize
);
658 return static_cast<int>((s
+ add_amount
[i
]) >> shift_amount
[i
]);
661 // Mapping from size class to max size storable in that class
662 static size_t class_to_size
[kNumClasses
];
664 // Mapping from size class to number of pages to allocate at a time
665 static size_t class_to_pages
[kNumClasses
];
667 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
668 // back and forth between thread caches and the central cache for a given size
671 void *head
; // Head of chain of objects.
672 void *tail
; // Tail of chain of objects.
674 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
675 // slots to put link list chains into. To keep memory usage bounded the total
676 // number of TCEntries across size classes is fixed. Currently each size
677 // class is initially given one TCEntry which also means that the maximum any
678 // one class can have is kNumClasses.
679 static const int kNumTransferEntries
= kNumClasses
;
681 // Note: the following only works for "n"s that fit in 32-bits, but
682 // that is fine since we only use it for small sizes.
683 static inline int LgFloor(size_t n
) {
685 for (int i
= 4; i
>= 0; --i
) {
686 int shift
= (1 << i
);
687 size_t x
= n
>> shift
;
697 // Some very basic linked list functions for dealing with using void * as
700 static inline void *SLL_Next(void *t
) {
701 return *(reinterpret_cast<void**>(t
));
704 static inline void SLL_SetNext(void *t
, void *n
) {
705 *(reinterpret_cast<void**>(t
)) = n
;
708 static inline void SLL_Push(void **list
, void *element
) {
709 SLL_SetNext(element
, *list
);
713 static inline void *SLL_Pop(void **list
) {
714 void *result
= *list
;
715 *list
= SLL_Next(*list
);
720 // Remove N elements from a linked list to which head points. head will be
721 // modified to point to the new head. start and end will point to the first
722 // and last nodes of the range. Note that end will point to NULL after this
723 // function is called.
724 static inline void SLL_PopRange(void **head
, int N
, void **start
, void **end
) {
732 for (int i
= 1; i
< N
; ++i
) {
738 *head
= SLL_Next(tmp
);
739 // Unlink range from list.
740 SLL_SetNext(tmp
, NULL
);
743 static inline void SLL_PushRange(void **head
, void *start
, void *end
) {
745 SLL_SetNext(end
, *head
);
749 static inline size_t SLL_Size(void *head
) {
753 head
= SLL_Next(head
);
758 // Setup helper functions.
760 static ALWAYS_INLINE
size_t SizeClass(size_t size
) {
761 return class_array
[ClassIndex(size
)];
764 // Get the byte-size for a specified class
765 static ALWAYS_INLINE
size_t ByteSizeForClass(size_t cl
) {
766 return class_to_size
[cl
];
768 static int NumMoveSize(size_t size
) {
769 if (size
== 0) return 0;
770 // Use approx 64k transfers between thread and central caches.
771 int num
= static_cast<int>(64.0 * 1024.0 / size
);
772 if (num
< 2) num
= 2;
773 // Clamp well below kMaxFreeListLength to avoid ping pong between central
774 // and thread caches.
775 if (num
> static_cast<int>(0.8 * kMaxFreeListLength
))
776 num
= static_cast<int>(0.8 * kMaxFreeListLength
);
778 // Also, avoid bringing in too many objects into small object free
779 // lists. There are lots of such lists, and if we allow each one to
780 // fetch too many at a time, we end up having to scavenge too often
781 // (especially when there are lots of threads and each thread gets a
782 // small allowance for its thread cache).
784 // TODO: Make thread cache free list sizes dynamic so that we do not
785 // have to equally divide a fixed resource amongst lots of threads.
786 if (num
> 32) num
= 32;
791 // Initialize the mapping arrays
792 static void InitSizeClasses() {
793 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
794 if (ClassIndex(0) < 0) {
795 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
798 if (static_cast<size_t>(ClassIndex(kMaxSize
)) >= sizeof(class_array
)) {
799 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize
));
803 // Compute the size classes we want to use
804 size_t sc
= 1; // Next size class to assign
805 unsigned char alignshift
= kAlignShift
;
807 for (size_t size
= kAlignment
; size
<= kMaxSize
; size
+= (1 << alignshift
)) {
808 int lg
= LgFloor(size
);
810 // Increase alignment every so often.
812 // Since we double the alignment every time size doubles and
813 // size >= 128, this means that space wasted due to alignment is
814 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
815 // bytes, so the space wasted as a percentage starts falling for
817 if ((lg
>= 7) && (alignshift
< 8)) {
823 // Allocate enough pages so leftover is less than 1/8 of total.
824 // This bounds wasted space to at most 12.5%.
825 size_t psize
= kPageSize
;
826 while ((psize
% size
) > (psize
>> 3)) {
829 const size_t my_pages
= psize
>> kPageShift
;
831 if (sc
> 1 && my_pages
== class_to_pages
[sc
-1]) {
832 // See if we can merge this into the previous class without
833 // increasing the fragmentation of the previous class.
834 const size_t my_objects
= (my_pages
<< kPageShift
) / size
;
835 const size_t prev_objects
= (class_to_pages
[sc
-1] << kPageShift
)
836 / class_to_size
[sc
-1];
837 if (my_objects
== prev_objects
) {
838 // Adjust last class to include this size
839 class_to_size
[sc
-1] = size
;
845 class_to_pages
[sc
] = my_pages
;
846 class_to_size
[sc
] = size
;
849 if (sc
!= kNumClasses
) {
850 MESSAGE("wrong number of size classes: found %" PRIuS
" instead of %d\n",
851 sc
, int(kNumClasses
));
855 // Initialize the mapping arrays
857 for (unsigned char c
= 1; c
< kNumClasses
; c
++) {
858 const size_t max_size_in_class
= class_to_size
[c
];
859 for (size_t s
= next_size
; s
<= max_size_in_class
; s
+= kAlignment
) {
860 class_array
[ClassIndex(s
)] = c
;
862 next_size
= static_cast<int>(max_size_in_class
+ kAlignment
);
865 // Double-check sizes just to be safe
866 for (size_t size
= 0; size
<= kMaxSize
; size
++) {
867 const size_t sc
= SizeClass(size
);
869 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
872 if (sc
> 1 && size
<= class_to_size
[sc
-1]) {
873 MESSAGE("Allocating unnecessarily large class %" PRIuS
" for %" PRIuS
877 if (sc
>= kNumClasses
) {
878 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
881 const size_t s
= class_to_size
[sc
];
883 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
887 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
892 // Initialize the num_objects_to_move array.
893 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
894 num_objects_to_move
[cl
] = NumMoveSize(ByteSizeForClass(cl
));
899 // Dump class sizes and maximum external wastage per size class
900 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
901 const int alloc_size
= class_to_pages
[cl
] << kPageShift
;
902 const int alloc_objs
= alloc_size
/ class_to_size
[cl
];
903 const int min_used
= (class_to_size
[cl
-1] + 1) * alloc_objs
;
904 const int max_waste
= alloc_size
- min_used
;
905 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
907 int(class_to_size
[cl
-1] + 1),
908 int(class_to_size
[cl
]),
909 int(class_to_pages
[cl
] << kPageShift
),
910 max_waste
* 100.0 / alloc_size
917 // -------------------------------------------------------------------------
918 // Simple allocator for objects of a specified type. External locking
919 // is required before accessing one of these objects.
920 // -------------------------------------------------------------------------
922 // Metadata allocator -- keeps stats about how many bytes allocated
923 static uint64_t metadata_system_bytes
= 0;
924 static void* MetaDataAlloc(size_t bytes
) {
925 void* result
= TCMalloc_SystemAlloc(bytes
, 0);
926 if (result
!= NULL
) {
927 metadata_system_bytes
+= bytes
;
933 class PageHeapAllocator
{
935 // How much to allocate from system at a time
936 static const size_t kAllocIncrement
= 32 << 10;
939 static const size_t kAlignedSize
940 = (((sizeof(T
) + kAlignment
- 1) / kAlignment
) * kAlignment
);
942 // Free area from which to carve new objects
946 // Linked list of all regions allocated by this allocator
947 void* allocated_regions_
;
949 // Free list of already carved objects
952 // Number of allocated but unfreed objects
957 ASSERT(kAlignedSize
<= kAllocIncrement
);
959 allocated_regions_
= 0;
968 if (free_list_
!= NULL
) {
970 free_list_
= *(reinterpret_cast<void**>(result
));
972 if (free_avail_
< kAlignedSize
) {
974 char* new_allocation
= reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement
));
978 *(void**)new_allocation
= allocated_regions_
;
979 allocated_regions_
= new_allocation
;
980 free_area_
= new_allocation
+ kAlignedSize
;
981 free_avail_
= kAllocIncrement
- kAlignedSize
;
984 free_area_
+= kAlignedSize
;
985 free_avail_
-= kAlignedSize
;
988 return reinterpret_cast<T
*>(result
);
992 *(reinterpret_cast<void**>(p
)) = free_list_
;
997 int inuse() const { return inuse_
; }
999 #if defined(WTF_CHANGES) && OS(DARWIN)
1000 template <class Recorder
>
1001 void recordAdministrativeRegions(Recorder
& recorder
, const RemoteMemoryReader
& reader
)
1003 vm_address_t adminAllocation
= reinterpret_cast<vm_address_t
>(allocated_regions_
);
1004 while (adminAllocation
) {
1005 recorder
.recordRegion(adminAllocation
, kAllocIncrement
);
1006 adminAllocation
= *reader(reinterpret_cast<vm_address_t
*>(adminAllocation
));
1012 // -------------------------------------------------------------------------
1013 // Span - a contiguous run of pages
1014 // -------------------------------------------------------------------------
1016 // Type that can hold a page number
1017 typedef uintptr_t PageID
;
1019 // Type that can hold the length of a run of pages
1020 typedef uintptr_t Length
;
1022 static const Length kMaxValidPages
= (~static_cast<Length
>(0)) >> kPageShift
;
1024 // Convert byte size into pages. This won't overflow, but may return
1025 // an unreasonably large value if bytes is huge enough.
1026 static inline Length
pages(size_t bytes
) {
1027 return (bytes
>> kPageShift
) +
1028 ((bytes
& (kPageSize
- 1)) > 0 ? 1 : 0);
1031 // Convert a user size into the number of bytes that will actually be
1033 static size_t AllocationSize(size_t bytes
) {
1034 if (bytes
> kMaxSize
) {
1035 // Large object: we allocate an integral number of pages
1036 ASSERT(bytes
<= (kMaxValidPages
<< kPageShift
));
1037 return pages(bytes
) << kPageShift
;
1039 // Small object: find the size class to which it belongs
1040 return ByteSizeForClass(SizeClass(bytes
));
1044 // Information kept for a span (a contiguous run of pages).
1046 PageID start
; // Starting page number
1047 Length length
; // Number of pages in span
1048 Span
* next
; // Used when in link list
1049 Span
* prev
; // Used when in link list
1050 void* objects
; // Linked list of free objects
1051 unsigned int free
: 1; // Is the span free
1052 #ifndef NO_TCMALLOC_SAMPLES
1053 unsigned int sample
: 1; // Sampled object?
1055 unsigned int sizeclass
: 8; // Size-class for small objects (or 0)
1056 unsigned int refcount
: 11; // Number of non-free objects
1057 bool decommitted
: 1;
1061 // For debugging, we can keep a log events per span
1068 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
1071 void Event(Span
* span
, char op
, int v
= 0) {
1072 span
->history
[span
->nexthistory
] = op
;
1073 span
->value
[span
->nexthistory
] = v
;
1074 span
->nexthistory
++;
1075 if (span
->nexthistory
== sizeof(span
->history
)) span
->nexthistory
= 0;
1078 #define Event(s,o,v) ((void) 0)
1081 // Allocator/deallocator for spans
1082 static PageHeapAllocator
<Span
> span_allocator
;
1083 static Span
* NewSpan(PageID p
, Length len
) {
1084 Span
* result
= span_allocator
.New();
1085 memset(result
, 0, sizeof(*result
));
1087 result
->length
= len
;
1089 result
->nexthistory
= 0;
1094 static inline void DeleteSpan(Span
* span
) {
1096 // In debug mode, trash the contents of deleted Spans
1097 memset(span
, 0x3f, sizeof(*span
));
1099 span_allocator
.Delete(span
);
1102 // -------------------------------------------------------------------------
1103 // Doubly linked list of spans.
1104 // -------------------------------------------------------------------------
1106 static inline void DLL_Init(Span
* list
) {
1111 static inline void DLL_Remove(Span
* span
) {
1112 span
->prev
->next
= span
->next
;
1113 span
->next
->prev
= span
->prev
;
1118 static ALWAYS_INLINE
bool DLL_IsEmpty(const Span
* list
) {
1119 return list
->next
== list
;
1122 static int DLL_Length(const Span
* list
) {
1124 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1130 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1131 static void DLL_Print(const char* label
, const Span
* list
) {
1132 MESSAGE("%-10s %p:", label
, list
);
1133 for (const Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1134 MESSAGE(" <%p,%u,%u>", s
, s
->start
, s
->length
);
1140 static inline void DLL_Prepend(Span
* list
, Span
* span
) {
1141 ASSERT(span
->next
== NULL
);
1142 ASSERT(span
->prev
== NULL
);
1143 span
->next
= list
->next
;
1145 list
->next
->prev
= span
;
1149 // -------------------------------------------------------------------------
1150 // Stack traces kept for sampled allocations
1151 // The following state is protected by pageheap_lock_.
1152 // -------------------------------------------------------------------------
1154 // size/depth are made the same size as a pointer so that some generic
1155 // code below can conveniently cast them back and forth to void*.
1156 static const int kMaxStackDepth
= 31;
1158 uintptr_t size
; // Size of object
1159 uintptr_t depth
; // Number of PC values stored in array below
1160 void* stack
[kMaxStackDepth
];
1162 static PageHeapAllocator
<StackTrace
> stacktrace_allocator
;
1163 static Span sampled_objects
;
1165 // -------------------------------------------------------------------------
1166 // Map from page-id to per-page data
1167 // -------------------------------------------------------------------------
1169 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1170 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1171 // because sometimes the sizeclass is all the information we need.
1173 // Selector class -- general selector uses 3-level map
1174 template <int BITS
> class MapSelector
{
1176 typedef TCMalloc_PageMap3
<BITS
-kPageShift
> Type
;
1177 typedef PackedCache
<BITS
, uint64_t> CacheType
;
1180 #if defined(WTF_CHANGES)
1182 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1183 // can be excluded from the PageMap key.
1184 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1186 static const size_t kBitsUnusedOn64Bit
= 16;
1188 static const size_t kBitsUnusedOn64Bit
= 0;
1191 // A three-level map for 64-bit machines
1192 template <> class MapSelector
<64> {
1194 typedef TCMalloc_PageMap3
<64 - kPageShift
- kBitsUnusedOn64Bit
> Type
;
1195 typedef PackedCache
<64, uint64_t> CacheType
;
1199 // A two-level map for 32-bit machines
1200 template <> class MapSelector
<32> {
1202 typedef TCMalloc_PageMap2
<32 - kPageShift
> Type
;
1203 typedef PackedCache
<32 - kPageShift
, uint16_t> CacheType
;
1206 // -------------------------------------------------------------------------
1207 // Page-level allocator
1208 // * Eager coalescing
1210 // Heap for page-level allocation. We allow allocating and freeing a
1211 // contiguous runs of pages (called a "span").
1212 // -------------------------------------------------------------------------
1214 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1215 // The central page heap collects spans of memory that have been deleted but are still committed until they are released
1216 // back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
1217 // system. Every 5 seconds, the background thread wakes up and does the following:
1218 // - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
1219 // of free committed pages and so we should not release them back to the system yet.
1220 // - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
1221 // back to the system.
1222 // - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
1223 // scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
1225 // Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
1226 static const int kScavengeTimerDelayInSeconds
= 5;
1228 // Number of free committed pages that we want to keep around.
1229 static const size_t kMinimumFreeCommittedPageCount
= 512;
1231 // During a scavenge, we'll release up to a fraction of the free committed pages.
1233 // We are slightly less aggressive in releasing memory on Windows due to performance reasons.
1234 static const int kMaxScavengeAmountFactor
= 3;
1236 static const int kMaxScavengeAmountFactor
= 2;
1240 class TCMalloc_PageHeap
{
1244 // Allocate a run of "n" pages. Returns zero if out of memory.
1245 Span
* New(Length n
);
1247 // Delete the span "[p, p+n-1]".
1248 // REQUIRES: span was returned by earlier call to New() and
1249 // has not yet been deleted.
1250 void Delete(Span
* span
);
1252 // Mark an allocated span as being used for small objects of the
1253 // specified size-class.
1254 // REQUIRES: span was returned by an earlier call to New()
1255 // and has not yet been deleted.
1256 void RegisterSizeClass(Span
* span
, size_t sc
);
1258 // Split an allocated span into two spans: one of length "n" pages
1259 // followed by another span of length "span->length - n" pages.
1260 // Modifies "*span" to point to the first span of length "n" pages.
1261 // Returns a pointer to the second span.
1263 // REQUIRES: "0 < n < span->length"
1264 // REQUIRES: !span->free
1265 // REQUIRES: span->sizeclass == 0
1266 Span
* Split(Span
* span
, Length n
);
1268 // Return the descriptor for the specified page.
1269 inline Span
* GetDescriptor(PageID p
) const {
1270 return reinterpret_cast<Span
*>(pagemap_
.get(p
));
1274 inline Span
* GetDescriptorEnsureSafe(PageID p
)
1276 pagemap_
.Ensure(p
, 1);
1277 return GetDescriptor(p
);
1280 size_t ReturnedBytes() const;
1283 // Dump state to stderr
1285 void Dump(TCMalloc_Printer
* out
);
1288 // Return number of bytes allocated from system
1289 inline uint64_t SystemBytes() const { return system_bytes_
; }
1291 // Return number of free bytes in heap
1292 uint64_t FreeBytes() const {
1293 return (static_cast<uint64_t>(free_pages_
) << kPageShift
);
1297 bool CheckList(Span
* list
, Length min_pages
, Length max_pages
);
1299 // Release all pages on the free list for reuse by the OS:
1300 void ReleaseFreePages();
1302 // Return 0 if we have no information, or else the correct sizeclass for p.
1303 // Reads and writes to pagemap_cache_ do not require locking.
1304 // The entries are 64 bits on 64-bit hardware and 16 bits on
1305 // 32-bit hardware, and we don't mind raciness as long as each read of
1306 // an entry yields a valid entry, not a partially updated entry.
1307 size_t GetSizeClassIfCached(PageID p
) const {
1308 return pagemap_cache_
.GetOrDefault(p
, 0);
1310 void CacheSizeClass(PageID p
, size_t cl
) const { pagemap_cache_
.Put(p
, cl
); }
1313 // Pick the appropriate map and cache types based on pointer size
1314 typedef MapSelector
<8*sizeof(uintptr_t)>::Type PageMap
;
1315 typedef MapSelector
<8*sizeof(uintptr_t)>::CacheType PageMapCache
;
1317 mutable PageMapCache pagemap_cache_
;
1319 // We segregate spans of a given size into two circular linked
1320 // lists: one for normal spans, and one for spans whose memory
1321 // has been returned to the system.
1327 // List of free spans of length >= kMaxPages
1330 // Array mapping from span length to a doubly linked list of free spans
1331 SpanList free_
[kMaxPages
];
1333 // Number of pages kept in free lists
1334 uintptr_t free_pages_
;
1336 // Bytes allocated from system
1337 uint64_t system_bytes_
;
1339 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1340 // Number of pages kept in free lists that are still committed.
1341 Length free_committed_pages_
;
1343 // Number of pages that we committed in the last scavenge wait interval.
1344 Length pages_committed_since_last_scavenge_
;
1347 bool GrowHeap(Length n
);
1349 // REQUIRES span->length >= n
1350 // Remove span from its free list, and move any leftover part of
1351 // span into appropriate free lists. Also update "span" to have
1352 // length exactly "n" and mark it as non-free so it can be returned
1355 // "released" is true iff "span" was found on a "returned" list.
1356 void Carve(Span
* span
, Length n
, bool released
);
1358 void RecordSpan(Span
* span
) {
1359 pagemap_
.set(span
->start
, span
);
1360 if (span
->length
> 1) {
1361 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1365 // Allocate a large span of length == n. If successful, returns a
1366 // span of exactly the specified length. Else, returns NULL.
1367 Span
* AllocLarge(Length n
);
1369 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1370 // Incrementally release some memory to the system.
1371 // IncrementalScavenge(n) is called whenever n pages are freed.
1372 void IncrementalScavenge(Length n
);
1375 // Number of pages to deallocate before doing more scavenging
1376 int64_t scavenge_counter_
;
1378 // Index of last free list we scavenged
1379 size_t scavenge_index_
;
1381 #if defined(WTF_CHANGES) && OS(DARWIN)
1382 friend class FastMallocZone
;
1385 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1386 void initializeScavenger();
1387 ALWAYS_INLINE
void signalScavenger();
1389 ALWAYS_INLINE
bool shouldContinueScavenging() const;
1391 #if !HAVE(DISPATCH_H)
1392 static NO_RETURN
void* runScavengerThread(void*);
1393 NO_RETURN
void scavengerThread();
1395 // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
1396 // it's blocked waiting for more pages to be deleted.
1397 bool m_scavengeThreadActive
;
1399 pthread_mutex_t m_scavengeMutex
;
1400 pthread_cond_t m_scavengeCondition
;
1401 #else // !HAVE(DISPATCH_H)
1402 void periodicScavenge();
1404 dispatch_queue_t m_scavengeQueue
;
1405 dispatch_source_t m_scavengeTimer
;
1406 bool m_scavengingScheduled
;
1409 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1412 void TCMalloc_PageHeap::init()
1414 pagemap_
.init(MetaDataAlloc
);
1415 pagemap_cache_
= PageMapCache(0);
1419 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1420 free_committed_pages_
= 0;
1421 pages_committed_since_last_scavenge_
= 0;
1422 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1424 scavenge_counter_
= 0;
1425 // Start scavenging at kMaxPages list
1426 scavenge_index_
= kMaxPages
-1;
1427 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
1428 DLL_Init(&large_
.normal
);
1429 DLL_Init(&large_
.returned
);
1430 for (size_t i
= 0; i
< kMaxPages
; i
++) {
1431 DLL_Init(&free_
[i
].normal
);
1432 DLL_Init(&free_
[i
].returned
);
1435 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1436 initializeScavenger();
1437 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1440 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1442 #if !HAVE(DISPATCH_H)
1444 void TCMalloc_PageHeap::initializeScavenger()
1446 pthread_mutex_init(&m_scavengeMutex
, 0);
1447 pthread_cond_init(&m_scavengeCondition
, 0);
1448 m_scavengeThreadActive
= true;
1450 pthread_create(&thread
, 0, runScavengerThread
, this);
1453 void* TCMalloc_PageHeap::runScavengerThread(void* context
)
1455 static_cast<TCMalloc_PageHeap
*>(context
)->scavengerThread();
1457 // Without this, Visual Studio will complain that this method does not return a value.
1462 ALWAYS_INLINE
void TCMalloc_PageHeap::signalScavenger()
1464 if (!m_scavengeThreadActive
&& shouldContinueScavenging())
1465 pthread_cond_signal(&m_scavengeCondition
);
1468 #else // !HAVE(DISPATCH_H)
1470 void TCMalloc_PageHeap::initializeScavenger()
1472 m_scavengeQueue
= dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL
);
1473 m_scavengeTimer
= dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER
, 0, 0, m_scavengeQueue
);
1474 dispatch_time_t startTime
= dispatch_time(DISPATCH_TIME_NOW
, kScavengeTimerDelayInSeconds
* NSEC_PER_SEC
);
1475 dispatch_source_set_timer(m_scavengeTimer
, startTime
, kScavengeTimerDelayInSeconds
* NSEC_PER_SEC
, 1000 * NSEC_PER_USEC
);
1476 dispatch_source_set_event_handler(m_scavengeTimer
, ^{ periodicScavenge(); });
1477 m_scavengingScheduled
= false;
1480 ALWAYS_INLINE
void TCMalloc_PageHeap::signalScavenger()
1482 if (!m_scavengingScheduled
&& shouldContinueScavenging()) {
1483 m_scavengingScheduled
= true;
1484 dispatch_resume(m_scavengeTimer
);
1490 void TCMalloc_PageHeap::scavenge()
1492 // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
1493 // for the amount of allocations that we do. So hold off on releasing memory back to the system.
1494 if (pages_committed_since_last_scavenge_
> 0) {
1495 pages_committed_since_last_scavenge_
= 0;
1498 Length pagesDecommitted
= 0;
1499 for (int i
= kMaxPages
; i
>= 0; i
--) {
1500 SpanList
* slist
= (static_cast<size_t>(i
) == kMaxPages
) ? &large_
: &free_
[i
];
1501 if (!DLL_IsEmpty(&slist
->normal
)) {
1502 // Release the last span on the normal portion of this list
1503 Span
* s
= slist
->normal
.prev
;
1504 // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
1505 if ((pagesDecommitted
+ s
->length
) * kMaxScavengeAmountFactor
> free_committed_pages_
)
1508 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1509 static_cast<size_t>(s
->length
<< kPageShift
));
1510 if (!s
->decommitted
) {
1511 pagesDecommitted
+= s
->length
;
1512 s
->decommitted
= true;
1514 DLL_Prepend(&slist
->returned
, s
);
1515 // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
1516 if (free_committed_pages_
<= kMinimumFreeCommittedPageCount
+ pagesDecommitted
)
1520 pages_committed_since_last_scavenge_
= 0;
1521 ASSERT(free_committed_pages_
>= pagesDecommitted
);
1522 free_committed_pages_
-= pagesDecommitted
;
1525 ALWAYS_INLINE
bool TCMalloc_PageHeap::shouldContinueScavenging() const
1527 return free_committed_pages_
> kMinimumFreeCommittedPageCount
;
1530 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1532 inline Span
* TCMalloc_PageHeap::New(Length n
) {
1536 // Find first size >= n that has a non-empty list
1537 for (Length s
= n
; s
< kMaxPages
; s
++) {
1539 bool released
= false;
1540 if (!DLL_IsEmpty(&free_
[s
].normal
)) {
1541 // Found normal span
1542 ll
= &free_
[s
].normal
;
1543 } else if (!DLL_IsEmpty(&free_
[s
].returned
)) {
1544 // Found returned span; reallocate it
1545 ll
= &free_
[s
].returned
;
1548 // Keep looking in larger classes
1552 Span
* result
= ll
->next
;
1553 Carve(result
, n
, released
);
1554 if (result
->decommitted
) {
1555 TCMalloc_SystemCommit(reinterpret_cast<void*>(result
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1556 result
->decommitted
= false;
1557 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1558 pages_committed_since_last_scavenge_
+= n
;
1561 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1563 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1564 // free committed pages count.
1565 ASSERT(free_committed_pages_
>= n
);
1566 free_committed_pages_
-= n
;
1568 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1574 Span
* result
= AllocLarge(n
);
1575 if (result
!= NULL
) {
1576 ASSERT_SPAN_COMMITTED(result
);
1580 // Grow the heap and try again
1586 return AllocLarge(n
);
1589 Span
* TCMalloc_PageHeap::AllocLarge(Length n
) {
1590 // find the best span (closest to n in size).
1591 // The following loops implements address-ordered best-fit.
1592 bool from_released
= false;
1595 // Search through normal list
1596 for (Span
* span
= large_
.normal
.next
;
1597 span
!= &large_
.normal
;
1598 span
= span
->next
) {
1599 if (span
->length
>= n
) {
1601 || (span
->length
< best
->length
)
1602 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1604 from_released
= false;
1609 // Search through released list in case it has a better fit
1610 for (Span
* span
= large_
.returned
.next
;
1611 span
!= &large_
.returned
;
1612 span
= span
->next
) {
1613 if (span
->length
>= n
) {
1615 || (span
->length
< best
->length
)
1616 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1618 from_released
= true;
1624 Carve(best
, n
, from_released
);
1625 if (best
->decommitted
) {
1626 TCMalloc_SystemCommit(reinterpret_cast<void*>(best
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1627 best
->decommitted
= false;
1628 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1629 pages_committed_since_last_scavenge_
+= n
;
1632 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1634 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1635 // free committed pages count.
1636 ASSERT(free_committed_pages_
>= n
);
1637 free_committed_pages_
-= n
;
1639 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1647 Span
* TCMalloc_PageHeap::Split(Span
* span
, Length n
) {
1649 ASSERT(n
< span
->length
);
1650 ASSERT(!span
->free
);
1651 ASSERT(span
->sizeclass
== 0);
1652 Event(span
, 'T', n
);
1654 const Length extra
= span
->length
- n
;
1655 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1656 Event(leftover
, 'U', extra
);
1657 RecordSpan(leftover
);
1658 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
1664 static ALWAYS_INLINE
void propagateDecommittedState(Span
* destination
, Span
* source
)
1666 destination
->decommitted
= source
->decommitted
;
1669 inline void TCMalloc_PageHeap::Carve(Span
* span
, Length n
, bool released
) {
1673 Event(span
, 'A', n
);
1675 const int extra
= static_cast<int>(span
->length
- n
);
1678 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1680 propagateDecommittedState(leftover
, span
);
1681 Event(leftover
, 'S', extra
);
1682 RecordSpan(leftover
);
1684 // Place leftover span on appropriate free list
1685 SpanList
* listpair
= (static_cast<size_t>(extra
) < kMaxPages
) ? &free_
[extra
] : &large_
;
1686 Span
* dst
= released
? &listpair
->returned
: &listpair
->normal
;
1687 DLL_Prepend(dst
, leftover
);
1690 pagemap_
.set(span
->start
+ n
- 1, span
);
1694 static ALWAYS_INLINE
void mergeDecommittedStates(Span
* destination
, Span
* other
)
1696 if (destination
->decommitted
&& !other
->decommitted
) {
1697 TCMalloc_SystemRelease(reinterpret_cast<void*>(other
->start
<< kPageShift
),
1698 static_cast<size_t>(other
->length
<< kPageShift
));
1699 } else if (other
->decommitted
&& !destination
->decommitted
) {
1700 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination
->start
<< kPageShift
),
1701 static_cast<size_t>(destination
->length
<< kPageShift
));
1702 destination
->decommitted
= true;
1706 inline void TCMalloc_PageHeap::Delete(Span
* span
) {
1708 ASSERT(!span
->free
);
1709 ASSERT(span
->length
> 0);
1710 ASSERT(GetDescriptor(span
->start
) == span
);
1711 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
1712 span
->sizeclass
= 0;
1713 #ifndef NO_TCMALLOC_SAMPLES
1717 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1718 // necessary. We do not bother resetting the stale pagemap
1719 // entries for the pieces we are merging together because we only
1720 // care about the pagemap entries for the boundaries.
1721 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1722 // Track the total size of the neighboring free spans that are committed.
1723 Length neighboringCommittedSpansLength
= 0;
1725 const PageID p
= span
->start
;
1726 const Length n
= span
->length
;
1727 Span
* prev
= GetDescriptor(p
-1);
1728 if (prev
!= NULL
&& prev
->free
) {
1729 // Merge preceding span into this span
1730 ASSERT(prev
->start
+ prev
->length
== p
);
1731 const Length len
= prev
->length
;
1732 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1733 if (!prev
->decommitted
)
1734 neighboringCommittedSpansLength
+= len
;
1736 mergeDecommittedStates(span
, prev
);
1740 span
->length
+= len
;
1741 pagemap_
.set(span
->start
, span
);
1742 Event(span
, 'L', len
);
1744 Span
* next
= GetDescriptor(p
+n
);
1745 if (next
!= NULL
&& next
->free
) {
1746 // Merge next span into this span
1747 ASSERT(next
->start
== p
+n
);
1748 const Length len
= next
->length
;
1749 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1750 if (!next
->decommitted
)
1751 neighboringCommittedSpansLength
+= len
;
1753 mergeDecommittedStates(span
, next
);
1756 span
->length
+= len
;
1757 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1758 Event(span
, 'R', len
);
1761 Event(span
, 'D', span
->length
);
1763 if (span
->decommitted
) {
1764 if (span
->length
< kMaxPages
)
1765 DLL_Prepend(&free_
[span
->length
].returned
, span
);
1767 DLL_Prepend(&large_
.returned
, span
);
1769 if (span
->length
< kMaxPages
)
1770 DLL_Prepend(&free_
[span
->length
].normal
, span
);
1772 DLL_Prepend(&large_
.normal
, span
);
1776 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1777 if (span
->decommitted
) {
1778 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
1779 // committed. Update the free committed pages count.
1780 free_committed_pages_
-= neighboringCommittedSpansLength
;
1782 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
1783 free_committed_pages_
+= n
;
1786 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
1789 IncrementalScavenge(n
);
1795 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1796 void TCMalloc_PageHeap::IncrementalScavenge(Length n
) {
1797 // Fast path; not yet time to release memory
1798 scavenge_counter_
-= n
;
1799 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
1801 static const size_t kDefaultReleaseDelay
= 64;
1803 // Find index of free list to scavenge
1804 size_t index
= scavenge_index_
+ 1;
1805 for (size_t i
= 0; i
< kMaxPages
+1; i
++) {
1806 if (index
> kMaxPages
) index
= 0;
1807 SpanList
* slist
= (index
== kMaxPages
) ? &large_
: &free_
[index
];
1808 if (!DLL_IsEmpty(&slist
->normal
)) {
1809 // Release the last span on the normal portion of this list
1810 Span
* s
= slist
->normal
.prev
;
1812 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1813 static_cast<size_t>(s
->length
<< kPageShift
));
1814 s
->decommitted
= true;
1815 DLL_Prepend(&slist
->returned
, s
);
1817 scavenge_counter_
= std::max
<size_t>(16UL, std::min
<size_t>(kDefaultReleaseDelay
, kDefaultReleaseDelay
- (free_pages_
/ kDefaultReleaseDelay
)));
1819 if (index
== kMaxPages
&& !DLL_IsEmpty(&slist
->normal
))
1820 scavenge_index_
= index
- 1;
1822 scavenge_index_
= index
;
1828 // Nothing to scavenge, delay for a while
1829 scavenge_counter_
= kDefaultReleaseDelay
;
1833 void TCMalloc_PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
1834 // Associate span object with all interior pages as well
1835 ASSERT(!span
->free
);
1836 ASSERT(GetDescriptor(span
->start
) == span
);
1837 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
1838 Event(span
, 'C', sc
);
1839 span
->sizeclass
= static_cast<unsigned int>(sc
);
1840 for (Length i
= 1; i
< span
->length
-1; i
++) {
1841 pagemap_
.set(span
->start
+i
, span
);
1846 size_t TCMalloc_PageHeap::ReturnedBytes() const {
1848 for (unsigned s
= 0; s
< kMaxPages
; s
++) {
1849 const int r_length
= DLL_Length(&free_
[s
].returned
);
1850 unsigned r_pages
= s
* r_length
;
1851 result
+= r_pages
<< kPageShift
;
1854 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
)
1855 result
+= s
->length
<< kPageShift
;
1861 static double PagesToMB(uint64_t pages
) {
1862 return (pages
<< kPageShift
) / 1048576.0;
1865 void TCMalloc_PageHeap::Dump(TCMalloc_Printer
* out
) {
1866 int nonempty_sizes
= 0;
1867 for (int s
= 0; s
< kMaxPages
; s
++) {
1868 if (!DLL_IsEmpty(&free_
[s
].normal
) || !DLL_IsEmpty(&free_
[s
].returned
)) {
1872 out
->printf("------------------------------------------------\n");
1873 out
->printf("PageHeap: %d sizes; %6.1f MB free\n",
1874 nonempty_sizes
, PagesToMB(free_pages_
));
1875 out
->printf("------------------------------------------------\n");
1876 uint64_t total_normal
= 0;
1877 uint64_t total_returned
= 0;
1878 for (int s
= 0; s
< kMaxPages
; s
++) {
1879 const int n_length
= DLL_Length(&free_
[s
].normal
);
1880 const int r_length
= DLL_Length(&free_
[s
].returned
);
1881 if (n_length
+ r_length
> 0) {
1882 uint64_t n_pages
= s
* n_length
;
1883 uint64_t r_pages
= s
* r_length
;
1884 total_normal
+= n_pages
;
1885 total_returned
+= r_pages
;
1886 out
->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1887 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1889 (n_length
+ r_length
),
1890 PagesToMB(n_pages
+ r_pages
),
1891 PagesToMB(total_normal
+ total_returned
),
1893 PagesToMB(total_returned
));
1897 uint64_t n_pages
= 0;
1898 uint64_t r_pages
= 0;
1901 out
->printf("Normal large spans:\n");
1902 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
1903 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1904 s
->length
, PagesToMB(s
->length
));
1905 n_pages
+= s
->length
;
1908 out
->printf("Unmapped large spans:\n");
1909 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
1910 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1911 s
->length
, PagesToMB(s
->length
));
1912 r_pages
+= s
->length
;
1915 total_normal
+= n_pages
;
1916 total_returned
+= r_pages
;
1917 out
->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1918 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1919 (n_spans
+ r_spans
),
1920 PagesToMB(n_pages
+ r_pages
),
1921 PagesToMB(total_normal
+ total_returned
),
1923 PagesToMB(total_returned
));
1927 bool TCMalloc_PageHeap::GrowHeap(Length n
) {
1928 ASSERT(kMaxPages
>= kMinSystemAlloc
);
1929 if (n
> kMaxValidPages
) return false;
1930 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
1932 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1935 // Try growing just "n" pages
1937 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1939 if (ptr
== NULL
) return false;
1941 ask
= actual_size
>> kPageShift
;
1943 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1944 pages_committed_since_last_scavenge_
+= ask
;
1947 uint64_t old_system_bytes
= system_bytes_
;
1948 system_bytes_
+= (ask
<< kPageShift
);
1949 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1952 // If we have already a lot of pages allocated, just pre allocate a bunch of
1953 // memory for the page map. This prevents fragmentation by pagemap metadata
1954 // when a program keeps allocating and freeing large blocks.
1956 if (old_system_bytes
< kPageMapBigAllocationThreshold
1957 && system_bytes_
>= kPageMapBigAllocationThreshold
) {
1958 pagemap_
.PreallocateMoreMemory();
1961 // Make sure pagemap_ has entries for all of the new pages.
1962 // Plus ensure one before and one after so coalescing code
1963 // does not need bounds-checking.
1964 if (pagemap_
.Ensure(p
-1, ask
+2)) {
1965 // Pretend the new area is allocated and then Delete() it to
1966 // cause any necessary coalescing to occur.
1968 // We do not adjust free_pages_ here since Delete() will do it for us.
1969 Span
* span
= NewSpan(p
, ask
);
1975 // We could not allocate memory within "pagemap_"
1976 // TODO: Once we can return memory to the system, return the new span
1981 bool TCMalloc_PageHeap::Check() {
1982 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
1983 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
1984 CheckList(&large_
.normal
, kMaxPages
, 1000000000);
1985 CheckList(&large_
.returned
, kMaxPages
, 1000000000);
1986 for (Length s
= 1; s
< kMaxPages
; s
++) {
1987 CheckList(&free_
[s
].normal
, s
, s
);
1988 CheckList(&free_
[s
].returned
, s
, s
);
1994 bool TCMalloc_PageHeap::CheckList(Span
*, Length
, Length
) {
1998 bool TCMalloc_PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
) {
1999 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
2000 CHECK_CONDITION(s
->free
);
2001 CHECK_CONDITION(s
->length
>= min_pages
);
2002 CHECK_CONDITION(s
->length
<= max_pages
);
2003 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
2004 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
2010 static void ReleaseFreeList(Span
* list
, Span
* returned
) {
2011 // Walk backwards through list so that when we push these
2012 // spans on the "returned" list, we preserve the order.
2013 while (!DLL_IsEmpty(list
)) {
2014 Span
* s
= list
->prev
;
2016 DLL_Prepend(returned
, s
);
2017 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
2018 static_cast<size_t>(s
->length
<< kPageShift
));
2022 void TCMalloc_PageHeap::ReleaseFreePages() {
2023 for (Length s
= 0; s
< kMaxPages
; s
++) {
2024 ReleaseFreeList(&free_
[s
].normal
, &free_
[s
].returned
);
2026 ReleaseFreeList(&large_
.normal
, &large_
.returned
);
2030 //-------------------------------------------------------------------
2032 //-------------------------------------------------------------------
2034 class TCMalloc_ThreadCache_FreeList
{
2036 void* list_
; // Linked list of nodes
2037 uint16_t length_
; // Current length
2038 uint16_t lowater_
; // Low water mark for list length
2047 // Return current length of list
2048 int length() const {
2053 bool empty() const {
2054 return list_
== NULL
;
2057 // Low-water mark management
2058 int lowwatermark() const { return lowater_
; }
2059 void clear_lowwatermark() { lowater_
= length_
; }
2061 ALWAYS_INLINE
void Push(void* ptr
) {
2062 SLL_Push(&list_
, ptr
);
2066 void PushRange(int N
, void *start
, void *end
) {
2067 SLL_PushRange(&list_
, start
, end
);
2068 length_
= length_
+ static_cast<uint16_t>(N
);
2071 void PopRange(int N
, void **start
, void **end
) {
2072 SLL_PopRange(&list_
, N
, start
, end
);
2073 ASSERT(length_
>= N
);
2074 length_
= length_
- static_cast<uint16_t>(N
);
2075 if (length_
< lowater_
) lowater_
= length_
;
2078 ALWAYS_INLINE
void* Pop() {
2079 ASSERT(list_
!= NULL
);
2081 if (length_
< lowater_
) lowater_
= length_
;
2082 return SLL_Pop(&list_
);
2086 template <class Finder
, class Reader
>
2087 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
2089 for (void* nextObject
= list_
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
2090 finder
.visit(nextObject
);
2095 //-------------------------------------------------------------------
2096 // Data kept per thread
2097 //-------------------------------------------------------------------
2099 class TCMalloc_ThreadCache
{
2101 typedef TCMalloc_ThreadCache_FreeList FreeList
;
2103 typedef DWORD ThreadIdentifier
;
2105 typedef pthread_t ThreadIdentifier
;
2108 size_t size_
; // Combined size of data
2109 ThreadIdentifier tid_
; // Which thread owns it
2110 bool in_setspecific_
; // Called pthread_setspecific?
2111 FreeList list_
[kNumClasses
]; // Array indexed by size-class
2113 // We sample allocations, biased by the size of the allocation
2114 uint32_t rnd_
; // Cheap random number generator
2115 size_t bytes_until_sample_
; // Bytes until we sample next
2117 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2118 static inline TCMalloc_ThreadCache
* NewHeap(ThreadIdentifier tid
);
2120 // Use only as pthread thread-specific destructor function.
2121 static void DestroyThreadCache(void* ptr
);
2123 // All ThreadCache objects are kept in a linked list (for stats collection)
2124 TCMalloc_ThreadCache
* next_
;
2125 TCMalloc_ThreadCache
* prev_
;
2127 void Init(ThreadIdentifier tid
);
2130 // Accessors (mostly just for printing stats)
2131 int freelist_length(size_t cl
) const { return list_
[cl
].length(); }
2133 // Total byte size in cache
2134 size_t Size() const { return size_
; }
2136 void* Allocate(size_t size
);
2137 void Deallocate(void* ptr
, size_t size_class
);
2139 void FetchFromCentralCache(size_t cl
, size_t allocationSize
);
2140 void ReleaseToCentralCache(size_t cl
, int N
);
2144 // Record allocation of "k" bytes. Return true iff allocation
2145 // should be sampled
2146 bool SampleAllocation(size_t k
);
2148 // Pick next sampling point
2149 void PickNextSample(size_t k
);
2151 static void InitModule();
2152 static void InitTSD();
2153 static TCMalloc_ThreadCache
* GetThreadHeap();
2154 static TCMalloc_ThreadCache
* GetCache();
2155 static TCMalloc_ThreadCache
* GetCacheIfPresent();
2156 static TCMalloc_ThreadCache
* CreateCacheIfNecessary();
2157 static void DeleteCache(TCMalloc_ThreadCache
* heap
);
2158 static void BecomeIdle();
2159 static void RecomputeThreadCacheSize();
2162 template <class Finder
, class Reader
>
2163 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
2165 for (unsigned sizeClass
= 0; sizeClass
< kNumClasses
; sizeClass
++)
2166 list_
[sizeClass
].enumerateFreeObjects(finder
, reader
);
2171 //-------------------------------------------------------------------
2172 // Data kept per size-class in central cache
2173 //-------------------------------------------------------------------
2175 class TCMalloc_Central_FreeList
{
2177 void Init(size_t cl
);
2179 // These methods all do internal locking.
2181 // Insert the specified range into the central freelist. N is the number of
2182 // elements in the range.
2183 void InsertRange(void *start
, void *end
, int N
);
2185 // Returns the actual number of fetched elements into N.
2186 void RemoveRange(void **start
, void **end
, int *N
);
2188 // Returns the number of free objects in cache.
2190 SpinLockHolder
h(&lock_
);
2194 // Returns the number of free objects in the transfer cache.
2196 SpinLockHolder
h(&lock_
);
2197 return used_slots_
* num_objects_to_move
[size_class_
];
2201 template <class Finder
, class Reader
>
2202 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
, TCMalloc_Central_FreeList
* remoteCentralFreeList
)
2204 for (Span
* span
= &empty_
; span
&& span
!= &empty_
; span
= (span
->next
? reader(span
->next
) : 0))
2205 ASSERT(!span
->objects
);
2207 ASSERT(!nonempty_
.objects
);
2208 static const ptrdiff_t nonemptyOffset
= reinterpret_cast<const char*>(&nonempty_
) - reinterpret_cast<const char*>(this);
2210 Span
* remoteNonempty
= reinterpret_cast<Span
*>(reinterpret_cast<char*>(remoteCentralFreeList
) + nonemptyOffset
);
2211 Span
* remoteSpan
= nonempty_
.next
;
2213 for (Span
* span
= reader(remoteSpan
); span
&& remoteSpan
!= remoteNonempty
; remoteSpan
= span
->next
, span
= (span
->next
? reader(span
->next
) : 0)) {
2214 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
2215 finder
.visit(nextObject
);
2221 // REQUIRES: lock_ is held
2222 // Remove object from cache and return.
2223 // Return NULL if no free entries in cache.
2224 void* FetchFromSpans();
2226 // REQUIRES: lock_ is held
2227 // Remove object from cache and return. Fetches
2228 // from pageheap if cache is empty. Only returns
2229 // NULL on allocation failure.
2230 void* FetchFromSpansSafe();
2232 // REQUIRES: lock_ is held
2233 // Release a linked list of objects to spans.
2234 // May temporarily release lock_.
2235 void ReleaseListToSpans(void *start
);
2237 // REQUIRES: lock_ is held
2238 // Release an object to spans.
2239 // May temporarily release lock_.
2240 void ReleaseToSpans(void* object
);
2242 // REQUIRES: lock_ is held
2243 // Populate cache by fetching from the page heap.
2244 // May temporarily release lock_.
2247 // REQUIRES: lock is held.
2248 // Tries to make room for a TCEntry. If the cache is full it will try to
2249 // expand it at the cost of some other cache size. Return false if there is
2251 bool MakeCacheSpace();
2253 // REQUIRES: lock_ for locked_size_class is held.
2254 // Picks a "random" size class to steal TCEntry slot from. In reality it
2255 // just iterates over the sizeclasses but does so without taking a lock.
2256 // Returns true on success.
2257 // May temporarily lock a "random" size class.
2258 static bool EvictRandomSizeClass(size_t locked_size_class
, bool force
);
2260 // REQUIRES: lock_ is *not* held.
2261 // Tries to shrink the Cache. If force is true it will relase objects to
2262 // spans if it allows it to shrink the cache. Return false if it failed to
2263 // shrink the cache. Decrements cache_size_ on succeess.
2264 // May temporarily take lock_. If it takes lock_, the locked_size_class
2265 // lock is released to the thread from holding two size class locks
2266 // concurrently which could lead to a deadlock.
2267 bool ShrinkCache(int locked_size_class
, bool force
);
2269 // This lock protects all the data members. cached_entries and cache_size_
2270 // may be looked at without holding the lock.
2273 // We keep linked lists of empty and non-empty spans.
2274 size_t size_class_
; // My size class
2275 Span empty_
; // Dummy header for list of empty spans
2276 Span nonempty_
; // Dummy header for list of non-empty spans
2277 size_t counter_
; // Number of free objects in cache entry
2279 // Here we reserve space for TCEntry cache slots. Since one size class can
2280 // end up getting all the TCEntries quota in the system we just preallocate
2281 // sufficient number of entries here.
2282 TCEntry tc_slots_
[kNumTransferEntries
];
2284 // Number of currently used cached entries in tc_slots_. This variable is
2285 // updated under a lock but can be read without one.
2286 int32_t used_slots_
;
2287 // The current number of slots for this size class. This is an
2288 // adaptive value that is increased if there is lots of traffic
2289 // on a given size class.
2290 int32_t cache_size_
;
2293 // Pad each CentralCache object to multiple of 64 bytes
2294 class TCMalloc_Central_FreeListPadded
: public TCMalloc_Central_FreeList
{
2296 char pad_
[(64 - (sizeof(TCMalloc_Central_FreeList
) % 64)) % 64];
2299 //-------------------------------------------------------------------
2301 //-------------------------------------------------------------------
2303 // Central cache -- a collection of free-lists, one per size-class.
2304 // We have a separate lock per free-list to reduce contention.
2305 static TCMalloc_Central_FreeListPadded central_cache
[kNumClasses
];
2307 // Page-level allocator
2308 static SpinLock pageheap_lock
= SPINLOCK_INITIALIZER
;
2311 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned
));
2313 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)];
2315 static bool phinited
= false;
2317 // Avoid extra level of indirection by making "pageheap" be just an alias
2318 // of pageheap_memory.
2321 TCMalloc_PageHeap
* m_pageHeap
;
2324 static inline TCMalloc_PageHeap
* getPageHeap()
2326 PageHeapUnion u
= { &pageheap_memory
[0] };
2327 return u
.m_pageHeap
;
2330 #define pageheap getPageHeap()
2332 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2334 #if !HAVE(DISPATCH_H)
2336 static void sleep(unsigned seconds
)
2338 ::Sleep(seconds
* 1000);
2342 void TCMalloc_PageHeap::scavengerThread()
2344 #if HAVE(PTHREAD_SETNAME_NP)
2345 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2349 if (!shouldContinueScavenging()) {
2350 pthread_mutex_lock(&m_scavengeMutex
);
2351 m_scavengeThreadActive
= false;
2352 // Block until there are enough freed pages to release back to the system.
2353 pthread_cond_wait(&m_scavengeCondition
, &m_scavengeMutex
);
2354 m_scavengeThreadActive
= true;
2355 pthread_mutex_unlock(&m_scavengeMutex
);
2357 sleep(kScavengeTimerDelayInSeconds
);
2359 SpinLockHolder
h(&pageheap_lock
);
2360 pageheap
->scavenge();
2367 void TCMalloc_PageHeap::periodicScavenge()
2370 SpinLockHolder
h(&pageheap_lock
);
2371 pageheap
->scavenge();
2374 if (!shouldContinueScavenging()) {
2375 m_scavengingScheduled
= false;
2376 dispatch_suspend(m_scavengeTimer
);
2379 #endif // HAVE(DISPATCH_H)
2383 // If TLS is available, we also store a copy
2384 // of the per-thread object in a __thread variable
2385 // since __thread variables are faster to read
2386 // than pthread_getspecific(). We still need
2387 // pthread_setspecific() because __thread
2388 // variables provide no way to run cleanup
2389 // code when a thread is destroyed.
2391 static __thread TCMalloc_ThreadCache
*threadlocal_heap
;
2393 // Thread-specific key. Initialization here is somewhat tricky
2394 // because some Linux startup code invokes malloc() before it
2395 // is in a good enough state to handle pthread_keycreate().
2396 // Therefore, we use TSD keys only after tsd_inited is set to true.
2397 // Until then, we use a slow path to get the heap object.
2398 static bool tsd_inited
= false;
2399 static pthread_key_t heap_key
;
2401 DWORD tlsIndex
= TLS_OUT_OF_INDEXES
;
2404 static ALWAYS_INLINE
void setThreadHeap(TCMalloc_ThreadCache
* heap
)
2406 // still do pthread_setspecific when using MSVC fast TLS to
2407 // benefit from the delete callback.
2408 pthread_setspecific(heap_key
, heap
);
2410 TlsSetValue(tlsIndex
, heap
);
2414 // Allocator for thread heaps
2415 static PageHeapAllocator
<TCMalloc_ThreadCache
> threadheap_allocator
;
2417 // Linked list of heap objects. Protected by pageheap_lock.
2418 static TCMalloc_ThreadCache
* thread_heaps
= NULL
;
2419 static int thread_heap_count
= 0;
2421 // Overall thread cache size. Protected by pageheap_lock.
2422 static size_t overall_thread_cache_size
= kDefaultOverallThreadCacheSize
;
2424 // Global per-thread cache size. Writes are protected by
2425 // pageheap_lock. Reads are done without any locking, which should be
2426 // fine as long as size_t can be written atomically and we don't place
2427 // invariants between this variable and other pieces of state.
2428 static volatile size_t per_thread_cache_size
= kMaxThreadCacheSize
;
2430 //-------------------------------------------------------------------
2431 // Central cache implementation
2432 //-------------------------------------------------------------------
2434 void TCMalloc_Central_FreeList::Init(size_t cl
) {
2438 DLL_Init(&nonempty_
);
2443 ASSERT(cache_size_
<= kNumTransferEntries
);
2446 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start
) {
2448 void *next
= SLL_Next(start
);
2449 ReleaseToSpans(start
);
2454 ALWAYS_INLINE
void TCMalloc_Central_FreeList::ReleaseToSpans(void* object
) {
2455 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
2456 Span
* span
= pageheap
->GetDescriptor(p
);
2457 ASSERT(span
!= NULL
);
2458 ASSERT(span
->refcount
> 0);
2460 // If span is empty, move it to non-empty list
2461 if (span
->objects
== NULL
) {
2463 DLL_Prepend(&nonempty_
, span
);
2464 Event(span
, 'N', 0);
2467 // The following check is expensive, so it is disabled by default
2469 // Check that object does not occur in list
2471 for (void* p
= span
->objects
; p
!= NULL
; p
= *((void**) p
)) {
2472 ASSERT(p
!= object
);
2475 ASSERT(got
+ span
->refcount
==
2476 (span
->length
<<kPageShift
)/ByteSizeForClass(span
->sizeclass
));
2481 if (span
->refcount
== 0) {
2482 Event(span
, '#', 0);
2483 counter_
-= (span
->length
<<kPageShift
) / ByteSizeForClass(span
->sizeclass
);
2486 // Release central list lock while operating on pageheap
2489 SpinLockHolder
h(&pageheap_lock
);
2490 pageheap
->Delete(span
);
2494 *(reinterpret_cast<void**>(object
)) = span
->objects
;
2495 span
->objects
= object
;
2499 ALWAYS_INLINE
bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2500 size_t locked_size_class
, bool force
) {
2501 static int race_counter
= 0;
2502 int t
= race_counter
++; // Updated without a lock, but who cares.
2503 if (t
>= static_cast<int>(kNumClasses
)) {
2504 while (t
>= static_cast<int>(kNumClasses
)) {
2510 ASSERT(t
< static_cast<int>(kNumClasses
));
2511 if (t
== static_cast<int>(locked_size_class
)) return false;
2512 return central_cache
[t
].ShrinkCache(static_cast<int>(locked_size_class
), force
);
2515 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2516 // Is there room in the cache?
2517 if (used_slots_
< cache_size_
) return true;
2518 // Check if we can expand this cache?
2519 if (cache_size_
== kNumTransferEntries
) return false;
2520 // Ok, we'll try to grab an entry from some other size class.
2521 if (EvictRandomSizeClass(size_class_
, false) ||
2522 EvictRandomSizeClass(size_class_
, true)) {
2523 // Succeeded in evicting, we're going to make our cache larger.
2532 class LockInverter
{
2534 SpinLock
*held_
, *temp_
;
2536 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
2537 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
2538 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
2542 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class
, bool force
) {
2543 // Start with a quick check without taking a lock.
2544 if (cache_size_
== 0) return false;
2545 // We don't evict from a full cache unless we are 'forcing'.
2546 if (force
== false && used_slots_
== cache_size_
) return false;
2548 // Grab lock, but first release the other lock held by this thread. We use
2549 // the lock inverter to ensure that we never hold two size class locks
2550 // concurrently. That can create a deadlock because there is no well
2551 // defined nesting order.
2552 LockInverter
li(¢ral_cache
[locked_size_class
].lock_
, &lock_
);
2553 ASSERT(used_slots_
<= cache_size_
);
2554 ASSERT(0 <= cache_size_
);
2555 if (cache_size_
== 0) return false;
2556 if (used_slots_
== cache_size_
) {
2557 if (force
== false) return false;
2558 // ReleaseListToSpans releases the lock, so we have to make all the
2559 // updates to the central list before calling it.
2562 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
2569 void TCMalloc_Central_FreeList::InsertRange(void *start
, void *end
, int N
) {
2570 SpinLockHolder
h(&lock_
);
2571 if (N
== num_objects_to_move
[size_class_
] &&
2573 int slot
= used_slots_
++;
2575 ASSERT(slot
< kNumTransferEntries
);
2576 TCEntry
*entry
= &tc_slots_
[slot
];
2577 entry
->head
= start
;
2581 ReleaseListToSpans(start
);
2584 void TCMalloc_Central_FreeList::RemoveRange(void **start
, void **end
, int *N
) {
2588 SpinLockHolder
h(&lock_
);
2589 if (num
== num_objects_to_move
[size_class_
] && used_slots_
> 0) {
2590 int slot
= --used_slots_
;
2592 TCEntry
*entry
= &tc_slots_
[slot
];
2593 *start
= entry
->head
;
2598 // TODO: Prefetch multiple TCEntries?
2599 void *tail
= FetchFromSpansSafe();
2601 // We are completely out of memory.
2602 *start
= *end
= NULL
;
2607 SLL_SetNext(tail
, NULL
);
2610 while (count
< num
) {
2611 void *t
= FetchFromSpans();
2622 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2623 void *t
= FetchFromSpans();
2626 t
= FetchFromSpans();
2631 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2632 if (DLL_IsEmpty(&nonempty_
)) return NULL
;
2633 Span
* span
= nonempty_
.next
;
2635 ASSERT(span
->objects
!= NULL
);
2636 ASSERT_SPAN_COMMITTED(span
);
2638 void* result
= span
->objects
;
2639 span
->objects
= *(reinterpret_cast<void**>(result
));
2640 if (span
->objects
== NULL
) {
2641 // Move to empty list
2643 DLL_Prepend(&empty_
, span
);
2644 Event(span
, 'E', 0);
2650 // Fetch memory from the system and add to the central cache freelist.
2651 ALWAYS_INLINE
void TCMalloc_Central_FreeList::Populate() {
2652 // Release central list lock while operating on pageheap
2654 const size_t npages
= class_to_pages
[size_class_
];
2658 SpinLockHolder
h(&pageheap_lock
);
2659 span
= pageheap
->New(npages
);
2660 if (span
) pageheap
->RegisterSizeClass(span
, size_class_
);
2663 MESSAGE("allocation failed: %d\n", errno
);
2667 ASSERT_SPAN_COMMITTED(span
);
2668 ASSERT(span
->length
== npages
);
2669 // Cache sizeclass info eagerly. Locking is not necessary.
2670 // (Instead of being eager, we could just replace any stale info
2671 // about this span, but that seems to be no better in practice.)
2672 for (size_t i
= 0; i
< npages
; i
++) {
2673 pageheap
->CacheSizeClass(span
->start
+ i
, size_class_
);
2676 // Split the block into pieces and add to the free-list
2677 // TODO: coloring of objects to avoid cache conflicts?
2678 void** tail
= &span
->objects
;
2679 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
2680 char* limit
= ptr
+ (npages
<< kPageShift
);
2681 const size_t size
= ByteSizeForClass(size_class_
);
2684 while ((nptr
= ptr
+ size
) <= limit
) {
2686 tail
= reinterpret_cast<void**>(ptr
);
2690 ASSERT(ptr
<= limit
);
2692 span
->refcount
= 0; // No sub-object in use yet
2694 // Add span to list of non-empty spans
2696 DLL_Prepend(&nonempty_
, span
);
2700 //-------------------------------------------------------------------
2701 // TCMalloc_ThreadCache implementation
2702 //-------------------------------------------------------------------
2704 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k
) {
2705 if (bytes_until_sample_
< k
) {
2709 bytes_until_sample_
-= k
;
2714 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid
) {
2719 in_setspecific_
= false;
2720 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2724 // Initialize RNG -- run it for a bit to get to good values
2725 bytes_until_sample_
= 0;
2726 rnd_
= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2727 for (int i
= 0; i
< 100; i
++) {
2728 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter
* 2));
2732 void TCMalloc_ThreadCache::Cleanup() {
2733 // Put unused memory back into central cache
2734 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2735 if (list_
[cl
].length() > 0) {
2736 ReleaseToCentralCache(cl
, list_
[cl
].length());
2741 ALWAYS_INLINE
void* TCMalloc_ThreadCache::Allocate(size_t size
) {
2742 ASSERT(size
<= kMaxSize
);
2743 const size_t cl
= SizeClass(size
);
2744 FreeList
* list
= &list_
[cl
];
2745 size_t allocationSize
= ByteSizeForClass(cl
);
2746 if (list
->empty()) {
2747 FetchFromCentralCache(cl
, allocationSize
);
2748 if (list
->empty()) return NULL
;
2750 size_
-= allocationSize
;
2754 inline void TCMalloc_ThreadCache::Deallocate(void* ptr
, size_t cl
) {
2755 size_
+= ByteSizeForClass(cl
);
2756 FreeList
* list
= &list_
[cl
];
2758 // If enough data is free, put back into central cache
2759 if (list
->length() > kMaxFreeListLength
) {
2760 ReleaseToCentralCache(cl
, num_objects_to_move
[cl
]);
2762 if (size_
>= per_thread_cache_size
) Scavenge();
2765 // Remove some objects of class "cl" from central cache and add to thread heap
2766 ALWAYS_INLINE
void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl
, size_t allocationSize
) {
2767 int fetch_count
= num_objects_to_move
[cl
];
2769 central_cache
[cl
].RemoveRange(&start
, &end
, &fetch_count
);
2770 list_
[cl
].PushRange(fetch_count
, start
, end
);
2771 size_
+= allocationSize
* fetch_count
;
2774 // Remove some objects of class "cl" from thread heap and add to central cache
2775 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl
, int N
) {
2777 FreeList
* src
= &list_
[cl
];
2778 if (N
> src
->length()) N
= src
->length();
2779 size_
-= N
*ByteSizeForClass(cl
);
2781 // We return prepackaged chains of the correct size to the central cache.
2782 // TODO: Use the same format internally in the thread caches?
2783 int batch_size
= num_objects_to_move
[cl
];
2784 while (N
> batch_size
) {
2786 src
->PopRange(batch_size
, &head
, &tail
);
2787 central_cache
[cl
].InsertRange(head
, tail
, batch_size
);
2791 src
->PopRange(N
, &head
, &tail
);
2792 central_cache
[cl
].InsertRange(head
, tail
, N
);
2795 // Release idle memory to the central cache
2796 inline void TCMalloc_ThreadCache::Scavenge() {
2797 // If the low-water mark for the free list is L, it means we would
2798 // not have had to allocate anything from the central cache even if
2799 // we had reduced the free list size by L. We aim to get closer to
2800 // that situation by dropping L/2 nodes from the free list. This
2801 // may not release much memory, but if so we will call scavenge again
2802 // pretty soon and the low-water marks will be high on that call.
2803 //int64 start = CycleClock::Now();
2805 for (size_t cl
= 0; cl
< kNumClasses
; cl
++) {
2806 FreeList
* list
= &list_
[cl
];
2807 const int lowmark
= list
->lowwatermark();
2809 const int drop
= (lowmark
> 1) ? lowmark
/2 : 1;
2810 ReleaseToCentralCache(cl
, drop
);
2812 list
->clear_lowwatermark();
2815 //int64 finish = CycleClock::Now();
2817 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2820 void TCMalloc_ThreadCache::PickNextSample(size_t k
) {
2821 // Make next "random" number
2822 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2823 static const uint32_t kPoly
= (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2825 rnd_
= (r
<< 1) ^ ((static_cast<int32_t>(r
) >> 31) & kPoly
);
2827 // Next point is "rnd_ % (sample_period)". I.e., average
2828 // increment is "sample_period/2".
2829 const int flag_value
= static_cast<int>(FLAGS_tcmalloc_sample_parameter
);
2830 static int last_flag_value
= -1;
2832 if (flag_value
!= last_flag_value
) {
2833 SpinLockHolder
h(&sample_period_lock
);
2835 for (i
= 0; i
< (static_cast<int>(sizeof(primes_list
)/sizeof(primes_list
[0])) - 1); i
++) {
2836 if (primes_list
[i
] >= flag_value
) {
2840 sample_period
= primes_list
[i
];
2841 last_flag_value
= flag_value
;
2844 bytes_until_sample_
+= rnd_
% sample_period
;
2846 if (k
> (static_cast<size_t>(-1) >> 2)) {
2847 // If the user has asked for a huge allocation then it is possible
2848 // for the code below to loop infinitely. Just return (note that
2849 // this throws off the sampling accuracy somewhat, but a user who
2850 // is allocating more than 1G of memory at a time can live with a
2851 // minor inaccuracy in profiling of small allocations, and also
2852 // would rather not wait for the loop below to terminate).
2856 while (bytes_until_sample_
< k
) {
2857 // Increase bytes_until_sample_ by enough average sampling periods
2858 // (sample_period >> 1) to allow us to sample past the current
2860 bytes_until_sample_
+= (sample_period
>> 1);
2863 bytes_until_sample_
-= k
;
2866 void TCMalloc_ThreadCache::InitModule() {
2867 // There is a slight potential race here because of double-checked
2868 // locking idiom. However, as long as the program does a small
2869 // allocation before switching to multi-threaded mode, we will be
2870 // fine. We increase the chances of doing such a small allocation
2871 // by doing one in the constructor of the module_enter_exit_hook
2872 // object declared below.
2873 SpinLockHolder
h(&pageheap_lock
);
2879 threadheap_allocator
.Init();
2880 span_allocator
.Init();
2881 span_allocator
.New(); // Reduce cache conflicts
2882 span_allocator
.New(); // Reduce cache conflicts
2883 stacktrace_allocator
.Init();
2884 DLL_Init(&sampled_objects
);
2885 for (size_t i
= 0; i
< kNumClasses
; ++i
) {
2886 central_cache
[i
].Init(i
);
2890 #if defined(WTF_CHANGES) && OS(DARWIN)
2891 FastMallocZone::init();
2896 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid
) {
2897 // Create the heap and add it to the linked list
2898 TCMalloc_ThreadCache
*heap
= threadheap_allocator
.New();
2900 heap
->next_
= thread_heaps
;
2902 if (thread_heaps
!= NULL
) thread_heaps
->prev_
= heap
;
2903 thread_heaps
= heap
;
2904 thread_heap_count
++;
2905 RecomputeThreadCacheSize();
2909 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetThreadHeap() {
2911 // __thread is faster, but only when the kernel supports it
2912 if (KernelSupportsTLS())
2913 return threadlocal_heap
;
2914 #elif COMPILER(MSVC)
2915 return static_cast<TCMalloc_ThreadCache
*>(TlsGetValue(tlsIndex
));
2917 return static_cast<TCMalloc_ThreadCache
*>(pthread_getspecific(heap_key
));
2921 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCache() {
2922 TCMalloc_ThreadCache
* ptr
= NULL
;
2926 ptr
= GetThreadHeap();
2928 if (ptr
== NULL
) ptr
= CreateCacheIfNecessary();
2932 // In deletion paths, we do not try to create a thread-cache. This is
2933 // because we may be in the thread destruction code and may have
2934 // already cleaned up the cache for this thread.
2935 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCacheIfPresent() {
2936 if (!tsd_inited
) return NULL
;
2937 void* const p
= GetThreadHeap();
2938 return reinterpret_cast<TCMalloc_ThreadCache
*>(p
);
2941 void TCMalloc_ThreadCache::InitTSD() {
2942 ASSERT(!tsd_inited
);
2943 pthread_key_create(&heap_key
, DestroyThreadCache
);
2945 tlsIndex
= TlsAlloc();
2950 // We may have used a fake pthread_t for the main thread. Fix it.
2952 memset(&zero
, 0, sizeof(zero
));
2955 SpinLockHolder
h(&pageheap_lock
);
2957 ASSERT(pageheap_lock
.IsHeld());
2959 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2962 h
->tid_
= GetCurrentThreadId();
2965 if (pthread_equal(h
->tid_
, zero
)) {
2966 h
->tid_
= pthread_self();
2972 TCMalloc_ThreadCache
* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2973 // Initialize per-thread data if necessary
2974 TCMalloc_ThreadCache
* heap
= NULL
;
2976 SpinLockHolder
h(&pageheap_lock
);
2983 me
= GetCurrentThreadId();
2986 // Early on in glibc's life, we cannot even call pthread_self()
2989 memset(&me
, 0, sizeof(me
));
2991 me
= pthread_self();
2995 // This may be a recursive malloc call from pthread_setspecific()
2996 // In that case, the heap for this thread has already been created
2997 // and added to the linked list. So we search for that first.
2998 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
3000 if (h
->tid_
== me
) {
3002 if (pthread_equal(h
->tid_
, me
)) {
3009 if (heap
== NULL
) heap
= NewHeap(me
);
3012 // We call pthread_setspecific() outside the lock because it may
3013 // call malloc() recursively. The recursive call will never get
3014 // here again because it will find the already allocated heap in the
3015 // linked list of heaps.
3016 if (!heap
->in_setspecific_
&& tsd_inited
) {
3017 heap
->in_setspecific_
= true;
3018 setThreadHeap(heap
);
3023 void TCMalloc_ThreadCache::BecomeIdle() {
3024 if (!tsd_inited
) return; // No caches yet
3025 TCMalloc_ThreadCache
* heap
= GetThreadHeap();
3026 if (heap
== NULL
) return; // No thread cache to remove
3027 if (heap
->in_setspecific_
) return; // Do not disturb the active caller
3029 heap
->in_setspecific_
= true;
3030 pthread_setspecific(heap_key
, NULL
);
3032 // Also update the copy in __thread
3033 threadlocal_heap
= NULL
;
3035 heap
->in_setspecific_
= false;
3036 if (GetThreadHeap() == heap
) {
3037 // Somehow heap got reinstated by a recursive call to malloc
3038 // from pthread_setspecific. We give up in this case.
3042 // We can now get rid of the heap
3046 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr
) {
3047 // Note that "ptr" cannot be NULL since pthread promises not
3048 // to invoke the destructor on NULL values, but for safety,
3050 if (ptr
== NULL
) return;
3052 // Prevent fast path of GetThreadHeap() from returning heap.
3053 threadlocal_heap
= NULL
;
3055 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache
*>(ptr
));
3058 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache
* heap
) {
3059 // Remove all memory from heap
3062 // Remove from linked list
3063 SpinLockHolder
h(&pageheap_lock
);
3064 if (heap
->next_
!= NULL
) heap
->next_
->prev_
= heap
->prev_
;
3065 if (heap
->prev_
!= NULL
) heap
->prev_
->next_
= heap
->next_
;
3066 if (thread_heaps
== heap
) thread_heaps
= heap
->next_
;
3067 thread_heap_count
--;
3068 RecomputeThreadCacheSize();
3070 threadheap_allocator
.Delete(heap
);
3073 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
3074 // Divide available space across threads
3075 int n
= thread_heap_count
> 0 ? thread_heap_count
: 1;
3076 size_t space
= overall_thread_cache_size
/ n
;
3078 // Limit to allowed range
3079 if (space
< kMinThreadCacheSize
) space
= kMinThreadCacheSize
;
3080 if (space
> kMaxThreadCacheSize
) space
= kMaxThreadCacheSize
;
3082 per_thread_cache_size
= space
;
3085 void TCMalloc_ThreadCache::Print() const {
3086 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
3087 MESSAGE(" %5" PRIuS
" : %4d len; %4d lo\n",
3088 ByteSizeForClass(cl
),
3090 list_
[cl
].lowwatermark());
3094 // Extract interesting stats
3095 struct TCMallocStats
{
3096 uint64_t system_bytes
; // Bytes alloced from system
3097 uint64_t thread_bytes
; // Bytes in thread caches
3098 uint64_t central_bytes
; // Bytes in central cache
3099 uint64_t transfer_bytes
; // Bytes in central transfer cache
3100 uint64_t pageheap_bytes
; // Bytes in page heap
3101 uint64_t metadata_bytes
; // Bytes alloced for metadata
3105 // Get stats into "r". Also get per-size-class counts if class_count != NULL
3106 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
) {
3107 r
->central_bytes
= 0;
3108 r
->transfer_bytes
= 0;
3109 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
3110 const int length
= central_cache
[cl
].length();
3111 const int tc_length
= central_cache
[cl
].tc_length();
3112 r
->central_bytes
+= static_cast<uint64_t>(ByteSizeForClass(cl
)) * length
;
3113 r
->transfer_bytes
+=
3114 static_cast<uint64_t>(ByteSizeForClass(cl
)) * tc_length
;
3115 if (class_count
) class_count
[cl
] = length
+ tc_length
;
3118 // Add stats from per-thread heaps
3119 r
->thread_bytes
= 0;
3121 SpinLockHolder
h(&pageheap_lock
);
3122 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
3123 r
->thread_bytes
+= h
->Size();
3125 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
3126 class_count
[cl
] += h
->freelist_length(cl
);
3133 SpinLockHolder
h(&pageheap_lock
);
3134 r
->system_bytes
= pageheap
->SystemBytes();
3135 r
->metadata_bytes
= metadata_system_bytes
;
3136 r
->pageheap_bytes
= pageheap
->FreeBytes();
3142 // WRITE stats to "out"
3143 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
3144 TCMallocStats stats
;
3145 uint64_t class_count
[kNumClasses
];
3146 ExtractStats(&stats
, (level
>= 2 ? class_count
: NULL
));
3149 out
->printf("------------------------------------------------\n");
3150 uint64_t cumulative
= 0;
3151 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
3152 if (class_count
[cl
] > 0) {
3153 uint64_t class_bytes
= class_count
[cl
] * ByteSizeForClass(cl
);
3154 cumulative
+= class_bytes
;
3155 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
3156 "%8" PRIu64
" objs; %5.1f MB; %5.1f cum MB\n",
3157 cl
, ByteSizeForClass(cl
),
3159 class_bytes
/ 1048576.0,
3160 cumulative
/ 1048576.0);
3164 SpinLockHolder
h(&pageheap_lock
);
3165 pageheap
->Dump(out
);
3168 const uint64_t bytes_in_use
= stats
.system_bytes
3169 - stats
.pageheap_bytes
3170 - stats
.central_bytes
3171 - stats
.transfer_bytes
3172 - stats
.thread_bytes
;
3174 out
->printf("------------------------------------------------\n"
3175 "MALLOC: %12" PRIu64
" Heap size\n"
3176 "MALLOC: %12" PRIu64
" Bytes in use by application\n"
3177 "MALLOC: %12" PRIu64
" Bytes free in page heap\n"
3178 "MALLOC: %12" PRIu64
" Bytes free in central cache\n"
3179 "MALLOC: %12" PRIu64
" Bytes free in transfer cache\n"
3180 "MALLOC: %12" PRIu64
" Bytes free in thread caches\n"
3181 "MALLOC: %12" PRIu64
" Spans in use\n"
3182 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
3183 "MALLOC: %12" PRIu64
" Metadata allocated\n"
3184 "------------------------------------------------\n",
3187 stats
.pageheap_bytes
,
3188 stats
.central_bytes
,
3189 stats
.transfer_bytes
,
3191 uint64_t(span_allocator
.inuse()),
3192 uint64_t(threadheap_allocator
.inuse()),
3193 stats
.metadata_bytes
);
3196 static void PrintStats(int level
) {
3197 const int kBufferSize
= 16 << 10;
3198 char* buffer
= new char[kBufferSize
];
3199 TCMalloc_Printer
printer(buffer
, kBufferSize
);
3200 DumpStats(&printer
, level
);
3201 write(STDERR_FILENO
, buffer
, strlen(buffer
));
3205 static void** DumpStackTraces() {
3206 // Count how much space we need
3207 int needed_slots
= 0;
3209 SpinLockHolder
h(&pageheap_lock
);
3210 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
3211 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
3212 needed_slots
+= 3 + stack
->depth
;
3214 needed_slots
+= 100; // Slop in case sample grows
3215 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
3218 void** result
= new void*[needed_slots
];
3219 if (result
== NULL
) {
3220 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
3225 SpinLockHolder
h(&pageheap_lock
);
3227 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
3228 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
3229 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
3230 if (used_slots
+ 3 + stack
->depth
>= needed_slots
) {
3235 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
3236 result
[used_slots
+1] = reinterpret_cast<void*>(stack
->size
);
3237 result
[used_slots
+2] = reinterpret_cast<void*>(stack
->depth
);
3238 for (int d
= 0; d
< stack
->depth
; d
++) {
3239 result
[used_slots
+3+d
] = stack
->stack
[d
];
3241 used_slots
+= 3 + stack
->depth
;
3243 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
3250 // TCMalloc's support for extra malloc interfaces
3251 class TCMallocImplementation
: public MallocExtension
{
3253 virtual void GetStats(char* buffer
, int buffer_length
) {
3254 ASSERT(buffer_length
> 0);
3255 TCMalloc_Printer
printer(buffer
, buffer_length
);
3257 // Print level one stats unless lots of space is available
3258 if (buffer_length
< 10000) {
3259 DumpStats(&printer
, 1);
3261 DumpStats(&printer
, 2);
3265 virtual void** ReadStackTraces() {
3266 return DumpStackTraces();
3269 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
3270 ASSERT(name
!= NULL
);
3272 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
3273 TCMallocStats stats
;
3274 ExtractStats(&stats
, NULL
);
3275 *value
= stats
.system_bytes
3276 - stats
.thread_bytes
3277 - stats
.central_bytes
3278 - stats
.pageheap_bytes
;
3282 if (strcmp(name
, "generic.heap_size") == 0) {
3283 TCMallocStats stats
;
3284 ExtractStats(&stats
, NULL
);
3285 *value
= stats
.system_bytes
;
3289 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
3290 // We assume that bytes in the page heap are not fragmented too
3291 // badly, and are therefore available for allocation.
3292 SpinLockHolder
l(&pageheap_lock
);
3293 *value
= pageheap
->FreeBytes();
3297 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3298 SpinLockHolder
l(&pageheap_lock
);
3299 *value
= overall_thread_cache_size
;
3303 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
3304 TCMallocStats stats
;
3305 ExtractStats(&stats
, NULL
);
3306 *value
= stats
.thread_bytes
;
3313 virtual bool SetNumericProperty(const char* name
, size_t value
) {
3314 ASSERT(name
!= NULL
);
3316 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3317 // Clip the value to a reasonable range
3318 if (value
< kMinThreadCacheSize
) value
= kMinThreadCacheSize
;
3319 if (value
> (1<<30)) value
= (1<<30); // Limit to 1GB
3321 SpinLockHolder
l(&pageheap_lock
);
3322 overall_thread_cache_size
= static_cast<size_t>(value
);
3323 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
3330 virtual void MarkThreadIdle() {
3331 TCMalloc_ThreadCache::BecomeIdle();
3334 virtual void ReleaseFreeMemory() {
3335 SpinLockHolder
h(&pageheap_lock
);
3336 pageheap
->ReleaseFreePages();
3341 // The constructor allocates an object to ensure that initialization
3342 // runs before main(), and therefore we do not have a chance to become
3343 // multi-threaded before initialization. We also create the TSD key
3344 // here. Presumably by the time this constructor runs, glibc is in
3345 // good enough shape to handle pthread_key_create().
3347 // The constructor also takes the opportunity to tell STL to use
3348 // tcmalloc. We want to do this early, before construct time, so
3349 // all user STL allocations go through tcmalloc (which works really
3352 // The destructor prints stats when the program exits.
3353 class TCMallocGuard
{
3357 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3358 // Check whether the kernel also supports TLS (needs to happen at runtime)
3359 CheckIfKernelSupportsTLS();
3362 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
3363 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
3367 TCMalloc_ThreadCache::InitTSD();
3370 MallocExtension::Register(new TCMallocImplementation
);
3376 const char* env
= getenv("MALLOCSTATS");
3378 int level
= atoi(env
);
3379 if (level
< 1) level
= 1;
3383 UnpatchWindowsFunctions();
3390 static TCMallocGuard module_enter_exit_hook
;
3394 //-------------------------------------------------------------------
3395 // Helpers for the exported routines below
3396 //-------------------------------------------------------------------
3400 static Span
* DoSampledAllocation(size_t size
) {
3402 // Grab the stack trace outside the heap lock
3404 tmp
.depth
= GetStackTrace(tmp
.stack
, kMaxStackDepth
, 1);
3407 SpinLockHolder
h(&pageheap_lock
);
3409 Span
*span
= pageheap
->New(pages(size
== 0 ? 1 : size
));
3414 // Allocate stack trace
3415 StackTrace
*stack
= stacktrace_allocator
.New();
3416 if (stack
== NULL
) {
3417 // Sampling failed because of lack of memory
3423 span
->objects
= stack
;
3424 DLL_Prepend(&sampled_objects
, span
);
3430 static inline bool CheckCachedSizeClass(void *ptr
) {
3431 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3432 size_t cached_value
= pageheap
->GetSizeClassIfCached(p
);
3433 return cached_value
== 0 ||
3434 cached_value
== pageheap
->GetDescriptor(p
)->sizeclass
;
3437 static inline void* CheckedMallocResult(void *result
)
3439 ASSERT(result
== 0 || CheckCachedSizeClass(result
));
3443 static inline void* SpanToMallocResult(Span
*span
) {
3444 ASSERT_SPAN_COMMITTED(span
);
3445 pageheap
->CacheSizeClass(span
->start
, 0);
3447 CheckedMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
3451 template <bool crashOnFailure
>
3453 static ALWAYS_INLINE
void* do_malloc(size_t size
) {
3457 ASSERT(!isForbidden());
3460 // The following call forces module initialization
3461 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3463 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
3464 Span
* span
= DoSampledAllocation(size
);
3466 ret
= SpanToMallocResult(span
);
3470 if (size
> kMaxSize
) {
3471 // Use page-level allocator
3472 SpinLockHolder
h(&pageheap_lock
);
3473 Span
* span
= pageheap
->New(pages(size
));
3475 ret
= SpanToMallocResult(span
);
3478 // The common case, and also the simplest. This just pops the
3479 // size-appropriate freelist, afer replenishing it if it's empty.
3480 ret
= CheckedMallocResult(heap
->Allocate(size
));
3484 if (crashOnFailure
) // This branch should be optimized out by the compiler.
3493 static ALWAYS_INLINE
void do_free(void* ptr
) {
3494 if (ptr
== NULL
) return;
3495 ASSERT(pageheap
!= NULL
); // Should not call free() before malloc()
3496 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3498 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3501 span
= pageheap
->GetDescriptor(p
);
3502 cl
= span
->sizeclass
;
3503 pageheap
->CacheSizeClass(p
, cl
);
3506 #ifndef NO_TCMALLOC_SAMPLES
3507 ASSERT(!pageheap
->GetDescriptor(p
)->sample
);
3509 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCacheIfPresent();
3511 heap
->Deallocate(ptr
, cl
);
3513 // Delete directly into central cache
3514 SLL_SetNext(ptr
, NULL
);
3515 central_cache
[cl
].InsertRange(ptr
, ptr
, 1);
3518 SpinLockHolder
h(&pageheap_lock
);
3519 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
3520 ASSERT(span
!= NULL
&& span
->start
== p
);
3521 #ifndef NO_TCMALLOC_SAMPLES
3524 stacktrace_allocator
.Delete(reinterpret_cast<StackTrace
*>(span
->objects
));
3525 span
->objects
= NULL
;
3528 pageheap
->Delete(span
);
3533 // For use by exported routines below that want specific alignments
3535 // Note: this code can be slow, and can significantly fragment memory.
3536 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
3537 // not be invoked very often. This requirement simplifies our
3538 // implementation and allows us to tune for expected allocation
3540 static void* do_memalign(size_t align
, size_t size
) {
3541 ASSERT((align
& (align
- 1)) == 0);
3543 if (pageheap
== NULL
) TCMalloc_ThreadCache::InitModule();
3545 // Allocate at least one byte to avoid boundary conditions below
3546 if (size
== 0) size
= 1;
3548 if (size
<= kMaxSize
&& align
< kPageSize
) {
3549 // Search through acceptable size classes looking for one with
3550 // enough alignment. This depends on the fact that
3551 // InitSizeClasses() currently produces several size classes that
3552 // are aligned at powers of two. We will waste time and space if
3553 // we miss in the size class array, but that is deemed acceptable
3554 // since memalign() should be used rarely.
3555 size_t cl
= SizeClass(size
);
3556 while (cl
< kNumClasses
&& ((class_to_size
[cl
] & (align
- 1)) != 0)) {
3559 if (cl
< kNumClasses
) {
3560 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3561 return CheckedMallocResult(heap
->Allocate(class_to_size
[cl
]));
3565 // We will allocate directly from the page heap
3566 SpinLockHolder
h(&pageheap_lock
);
3568 if (align
<= kPageSize
) {
3569 // Any page-level allocation will be fine
3570 // TODO: We could put the rest of this page in the appropriate
3571 // TODO: cache but it does not seem worth it.
3572 Span
* span
= pageheap
->New(pages(size
));
3573 return span
== NULL
? NULL
: SpanToMallocResult(span
);
3576 // Allocate extra pages and carve off an aligned portion
3577 const Length alloc
= pages(size
+ align
);
3578 Span
* span
= pageheap
->New(alloc
);
3579 if (span
== NULL
) return NULL
;
3581 // Skip starting portion so that we end up aligned
3583 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
3586 ASSERT(skip
< alloc
);
3588 Span
* rest
= pageheap
->Split(span
, skip
);
3589 pageheap
->Delete(span
);
3593 // Skip trailing portion that we do not need to return
3594 const Length needed
= pages(size
);
3595 ASSERT(span
->length
>= needed
);
3596 if (span
->length
> needed
) {
3597 Span
* trailer
= pageheap
->Split(span
, needed
);
3598 pageheap
->Delete(trailer
);
3600 return SpanToMallocResult(span
);
3604 // Helpers for use by exported routines below:
3607 static inline void do_malloc_stats() {
3612 static inline int do_mallopt(int, int) {
3613 return 1; // Indicates error
3616 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3617 static inline struct mallinfo
do_mallinfo() {
3618 TCMallocStats stats
;
3619 ExtractStats(&stats
, NULL
);
3621 // Just some of the fields are filled in.
3622 struct mallinfo info
;
3623 memset(&info
, 0, sizeof(info
));
3625 // Unfortunately, the struct contains "int" field, so some of the
3626 // size values will be truncated.
3627 info
.arena
= static_cast<int>(stats
.system_bytes
);
3628 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
3629 + stats
.central_bytes
3630 + stats
.transfer_bytes
);
3631 info
.fordblks
= static_cast<int>(stats
.pageheap_bytes
);
3632 info
.uordblks
= static_cast<int>(stats
.system_bytes
3633 - stats
.thread_bytes
3634 - stats
.central_bytes
3635 - stats
.transfer_bytes
3636 - stats
.pageheap_bytes
);
3642 //-------------------------------------------------------------------
3643 // Exported routines
3644 //-------------------------------------------------------------------
3646 // CAVEAT: The code structure below ensures that MallocHook methods are always
3647 // called from the stack frame of the invoked allocation function.
3648 // heap-checker.cc depends on this to start a stack trace from
3649 // the call to the (de)allocation function.
3654 #define do_malloc do_malloc<crashOnFailure>
3656 template <bool crashOnFailure
>
3657 void* malloc(size_t);
3659 void* fastMalloc(size_t size
)
3661 return malloc
<true>(size
);
3664 TryMallocReturnValue
tryFastMalloc(size_t size
)
3666 return malloc
<false>(size
);
3669 template <bool crashOnFailure
>
3672 void* malloc(size_t size
) {
3673 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3674 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= size
) // If overflow would occur...
3676 size
+= sizeof(AllocAlignmentInteger
);
3677 void* result
= do_malloc(size
);
3681 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
3682 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
3684 void* result
= do_malloc(size
);
3688 MallocHook::InvokeNewHook(result
, size
);
3696 void free(void* ptr
) {
3698 MallocHook::InvokeDeleteHook(ptr
);
3701 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3705 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(ptr
);
3706 if (*header
!= Internal::AllocTypeMalloc
)
3707 Internal::fastMallocMatchFailed(ptr
);
3717 template <bool crashOnFailure
>
3718 void* calloc(size_t, size_t);
3720 void* fastCalloc(size_t n
, size_t elem_size
)
3722 return calloc
<true>(n
, elem_size
);
3725 TryMallocReturnValue
tryFastCalloc(size_t n
, size_t elem_size
)
3727 return calloc
<false>(n
, elem_size
);
3730 template <bool crashOnFailure
>
3733 void* calloc(size_t n
, size_t elem_size
) {
3734 size_t totalBytes
= n
* elem_size
;
3736 // Protect against overflow
3737 if (n
> 1 && elem_size
&& (totalBytes
/ elem_size
) != n
)
3740 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3741 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= totalBytes
) // If overflow would occur...
3744 totalBytes
+= sizeof(AllocAlignmentInteger
);
3745 void* result
= do_malloc(totalBytes
);
3749 memset(result
, 0, totalBytes
);
3750 *static_cast<AllocAlignmentInteger
*>(result
) = Internal::AllocTypeMalloc
;
3751 result
= static_cast<AllocAlignmentInteger
*>(result
) + 1;
3753 void* result
= do_malloc(totalBytes
);
3754 if (result
!= NULL
) {
3755 memset(result
, 0, totalBytes
);
3760 MallocHook::InvokeNewHook(result
, totalBytes
);
3765 // Since cfree isn't used anywhere, we don't compile it in.
3770 void cfree(void* ptr
) {
3772 MallocHook::InvokeDeleteHook(ptr
);
3781 template <bool crashOnFailure
>
3782 void* realloc(void*, size_t);
3784 void* fastRealloc(void* old_ptr
, size_t new_size
)
3786 return realloc
<true>(old_ptr
, new_size
);
3789 TryMallocReturnValue
tryFastRealloc(void* old_ptr
, size_t new_size
)
3791 return realloc
<false>(old_ptr
, new_size
);
3794 template <bool crashOnFailure
>
3797 void* realloc(void* old_ptr
, size_t new_size
) {
3798 if (old_ptr
== NULL
) {
3799 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3800 void* result
= malloc(new_size
);
3802 void* result
= do_malloc(new_size
);
3804 MallocHook::InvokeNewHook(result
, new_size
);
3809 if (new_size
== 0) {
3811 MallocHook::InvokeDeleteHook(old_ptr
);
3817 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3818 if (std::numeric_limits
<size_t>::max() - sizeof(AllocAlignmentInteger
) <= new_size
) // If overflow would occur...
3820 new_size
+= sizeof(AllocAlignmentInteger
);
3821 AllocAlignmentInteger
* header
= Internal::fastMallocMatchValidationValue(old_ptr
);
3822 if (*header
!= Internal::AllocTypeMalloc
)
3823 Internal::fastMallocMatchFailed(old_ptr
);
3827 // Get the size of the old entry
3828 const PageID p
= reinterpret_cast<uintptr_t>(old_ptr
) >> kPageShift
;
3829 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3833 span
= pageheap
->GetDescriptor(p
);
3834 cl
= span
->sizeclass
;
3835 pageheap
->CacheSizeClass(p
, cl
);
3838 old_size
= ByteSizeForClass(cl
);
3840 ASSERT(span
!= NULL
);
3841 old_size
= span
->length
<< kPageShift
;
3844 // Reallocate if the new size is larger than the old size,
3845 // or if the new size is significantly smaller than the old size.
3846 if ((new_size
> old_size
) || (AllocationSize(new_size
) < old_size
)) {
3847 // Need to reallocate
3848 void* new_ptr
= do_malloc(new_size
);
3849 if (new_ptr
== NULL
) {
3853 MallocHook::InvokeNewHook(new_ptr
, new_size
);
3855 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
3857 MallocHook::InvokeDeleteHook(old_ptr
);
3859 // We could use a variant of do_free() that leverages the fact
3860 // that we already know the sizeclass of old_ptr. The benefit
3861 // would be small, so don't bother.
3863 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3864 new_ptr
= static_cast<AllocAlignmentInteger
*>(new_ptr
) + 1;
3868 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3869 old_ptr
= static_cast<AllocAlignmentInteger
*>(old_ptr
) + 1; // Set old_ptr back to the user pointer.
3879 static SpinLock set_new_handler_lock
= SPINLOCK_INITIALIZER
;
3881 static inline void* cpp_alloc(size_t size
, bool nothrow
) {
3883 void* p
= do_malloc(size
);
3887 if (p
== NULL
) { // allocation failed
3888 // Get the current new handler. NB: this function is not
3889 // thread-safe. We make a feeble stab at making it so here, but
3890 // this lock only protects against tcmalloc interfering with
3891 // itself, not with other libraries calling set_new_handler.
3892 std::new_handler nh
;
3894 SpinLockHolder
h(&set_new_handler_lock
);
3895 nh
= std::set_new_handler(0);
3896 (void) std::set_new_handler(nh
);
3898 // If no new_handler is established, the allocation failed.
3900 if (nothrow
) return 0;
3901 throw std::bad_alloc();
3903 // Otherwise, try the new_handler. If it returns, retry the
3904 // allocation. If it throws std::bad_alloc, fail the allocation.
3905 // if it throws something else, don't interfere.
3908 } catch (const std::bad_alloc
&) {
3909 if (!nothrow
) throw;
3912 } else { // allocation success
3919 void* operator new(size_t size
) {
3920 void* p
= cpp_alloc(size
, false);
3921 // We keep this next instruction out of cpp_alloc for a reason: when
3922 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3923 // new call into cpp_alloc, which messes up our whole section-based
3924 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3925 // isn't the last thing this fn calls, and prevents the folding.
3926 MallocHook::InvokeNewHook(p
, size
);
3930 void* operator new(size_t size
, const std::nothrow_t
&) __THROW
{
3931 void* p
= cpp_alloc(size
, true);
3932 MallocHook::InvokeNewHook(p
, size
);
3936 void operator delete(void* p
) __THROW
{
3937 MallocHook::InvokeDeleteHook(p
);
3941 void operator delete(void* p
, const std::nothrow_t
&) __THROW
{
3942 MallocHook::InvokeDeleteHook(p
);
3946 void* operator new[](size_t size
) {
3947 void* p
= cpp_alloc(size
, false);
3948 // We keep this next instruction out of cpp_alloc for a reason: when
3949 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3950 // new call into cpp_alloc, which messes up our whole section-based
3951 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3952 // isn't the last thing this fn calls, and prevents the folding.
3953 MallocHook::InvokeNewHook(p
, size
);
3957 void* operator new[](size_t size
, const std::nothrow_t
&) __THROW
{
3958 void* p
= cpp_alloc(size
, true);
3959 MallocHook::InvokeNewHook(p
, size
);
3963 void operator delete[](void* p
) __THROW
{
3964 MallocHook::InvokeDeleteHook(p
);
3968 void operator delete[](void* p
, const std::nothrow_t
&) __THROW
{
3969 MallocHook::InvokeDeleteHook(p
);
3973 extern "C" void* memalign(size_t align
, size_t size
) __THROW
{
3974 void* result
= do_memalign(align
, size
);
3975 MallocHook::InvokeNewHook(result
, size
);
3979 extern "C" int posix_memalign(void** result_ptr
, size_t align
, size_t size
)
3981 if (((align
% sizeof(void*)) != 0) ||
3982 ((align
& (align
- 1)) != 0) ||
3987 void* result
= do_memalign(align
, size
);
3988 MallocHook::InvokeNewHook(result
, size
);
3989 if (result
== NULL
) {
3992 *result_ptr
= result
;
3997 static size_t pagesize
= 0;
3999 extern "C" void* valloc(size_t size
) __THROW
{
4000 // Allocate page-aligned object of length >= size bytes
4001 if (pagesize
== 0) pagesize
= getpagesize();
4002 void* result
= do_memalign(pagesize
, size
);
4003 MallocHook::InvokeNewHook(result
, size
);
4007 extern "C" void* pvalloc(size_t size
) __THROW
{
4008 // Round up size to a multiple of pagesize
4009 if (pagesize
== 0) pagesize
= getpagesize();
4010 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
4011 void* result
= do_memalign(pagesize
, size
);
4012 MallocHook::InvokeNewHook(result
, size
);
4016 extern "C" void malloc_stats(void) {
4020 extern "C" int mallopt(int cmd
, int value
) {
4021 return do_mallopt(cmd
, value
);
4024 #ifdef HAVE_STRUCT_MALLINFO
4025 extern "C" struct mallinfo
mallinfo(void) {
4026 return do_mallinfo();
4030 //-------------------------------------------------------------------
4031 // Some library routines on RedHat 9 allocate memory using malloc()
4032 // and free it using __libc_free() (or vice-versa). Since we provide
4033 // our own implementations of malloc/free, we need to make sure that
4034 // the __libc_XXX variants (defined as part of glibc) also point to
4035 // the same implementations.
4036 //-------------------------------------------------------------------
4038 #if defined(__GLIBC__)
4040 #if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
4041 // Potentially faster variants that use the gcc alias extension.
4042 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
4043 # define ALIAS(x) __attribute__ ((weak, alias (x)))
4044 void* __libc_malloc(size_t size
) ALIAS("malloc");
4045 void __libc_free(void* ptr
) ALIAS("free");
4046 void* __libc_realloc(void* ptr
, size_t size
) ALIAS("realloc");
4047 void* __libc_calloc(size_t n
, size_t size
) ALIAS("calloc");
4048 void __libc_cfree(void* ptr
) ALIAS("cfree");
4049 void* __libc_memalign(size_t align
, size_t s
) ALIAS("memalign");
4050 void* __libc_valloc(size_t size
) ALIAS("valloc");
4051 void* __libc_pvalloc(size_t size
) ALIAS("pvalloc");
4052 int __posix_memalign(void** r
, size_t a
, size_t s
) ALIAS("posix_memalign");
4054 # else /* not __GNUC__ */
4055 // Portable wrappers
4056 void* __libc_malloc(size_t size
) { return malloc(size
); }
4057 void __libc_free(void* ptr
) { free(ptr
); }
4058 void* __libc_realloc(void* ptr
, size_t size
) { return realloc(ptr
, size
); }
4059 void* __libc_calloc(size_t n
, size_t size
) { return calloc(n
, size
); }
4060 void __libc_cfree(void* ptr
) { cfree(ptr
); }
4061 void* __libc_memalign(size_t align
, size_t s
) { return memalign(align
, s
); }
4062 void* __libc_valloc(size_t size
) { return valloc(size
); }
4063 void* __libc_pvalloc(size_t size
) { return pvalloc(size
); }
4064 int __posix_memalign(void** r
, size_t a
, size_t s
) {
4065 return posix_memalign(r
, a
, s
);
4067 # endif /* __GNUC__ */
4069 #endif /* __GLIBC__ */
4071 // Override __libc_memalign in libc on linux boxes specially.
4072 // They have a bug in libc that causes them to (very rarely) allocate
4073 // with __libc_memalign() yet deallocate with free() and the
4074 // definitions above don't catch it.
4075 // This function is an exception to the rule of calling MallocHook method
4076 // from the stack frame of the allocation function;
4077 // heap-checker handles this special case explicitly.
4078 static void *MemalignOverride(size_t align
, size_t size
, const void *caller
)
4080 void* result
= do_memalign(align
, size
);
4081 MallocHook::InvokeNewHook(result
, size
);
4084 void *(*__memalign_hook
)(size_t, size_t, const void *) = MemalignOverride
;
4088 #if defined(WTF_CHANGES) && OS(DARWIN)
4090 class FreeObjectFinder
{
4091 const RemoteMemoryReader
& m_reader
;
4092 HashSet
<void*> m_freeObjects
;
4095 FreeObjectFinder(const RemoteMemoryReader
& reader
) : m_reader(reader
) { }
4097 void visit(void* ptr
) { m_freeObjects
.add(ptr
); }
4098 bool isFreeObject(void* ptr
) const { return m_freeObjects
.contains(ptr
); }
4099 bool isFreeObject(vm_address_t ptr
) const { return isFreeObject(reinterpret_cast<void*>(ptr
)); }
4100 size_t freeObjectCount() const { return m_freeObjects
.size(); }
4102 void findFreeObjects(TCMalloc_ThreadCache
* threadCache
)
4104 for (; threadCache
; threadCache
= (threadCache
->next_
? m_reader(threadCache
->next_
) : 0))
4105 threadCache
->enumerateFreeObjects(*this, m_reader
);
4108 void findFreeObjects(TCMalloc_Central_FreeListPadded
* centralFreeList
, size_t numSizes
, TCMalloc_Central_FreeListPadded
* remoteCentralFreeList
)
4110 for (unsigned i
= 0; i
< numSizes
; i
++)
4111 centralFreeList
[i
].enumerateFreeObjects(*this, m_reader
, remoteCentralFreeList
+ i
);
4115 class PageMapFreeObjectFinder
{
4116 const RemoteMemoryReader
& m_reader
;
4117 FreeObjectFinder
& m_freeObjectFinder
;
4120 PageMapFreeObjectFinder(const RemoteMemoryReader
& reader
, FreeObjectFinder
& freeObjectFinder
)
4122 , m_freeObjectFinder(freeObjectFinder
)
4125 int visit(void* ptr
) const
4130 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
4132 void* ptr
= reinterpret_cast<void*>(span
->start
<< kPageShift
);
4133 m_freeObjectFinder
.visit(ptr
);
4134 } else if (span
->sizeclass
) {
4135 // Walk the free list of the small-object span, keeping track of each object seen
4136 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *m_reader(reinterpret_cast<void**>(nextObject
)))
4137 m_freeObjectFinder
.visit(nextObject
);
4139 return span
->length
;
4143 class PageMapMemoryUsageRecorder
{
4146 unsigned m_typeMask
;
4147 vm_range_recorder_t
* m_recorder
;
4148 const RemoteMemoryReader
& m_reader
;
4149 const FreeObjectFinder
& m_freeObjectFinder
;
4151 HashSet
<void*> m_seenPointers
;
4152 Vector
<Span
*> m_coalescedSpans
;
4155 PageMapMemoryUsageRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
, const FreeObjectFinder
& freeObjectFinder
)
4157 , m_context(context
)
4158 , m_typeMask(typeMask
)
4159 , m_recorder(recorder
)
4161 , m_freeObjectFinder(freeObjectFinder
)
4164 ~PageMapMemoryUsageRecorder()
4166 ASSERT(!m_coalescedSpans
.size());
4169 void recordPendingRegions()
4171 Span
* lastSpan
= m_coalescedSpans
[m_coalescedSpans
.size() - 1];
4172 vm_range_t ptrRange
= { m_coalescedSpans
[0]->start
<< kPageShift
, 0 };
4173 ptrRange
.size
= (lastSpan
->start
<< kPageShift
) - ptrRange
.address
+ (lastSpan
->length
* kPageSize
);
4175 // Mark the memory region the spans represent as a candidate for containing pointers
4176 if (m_typeMask
& MALLOC_PTR_REGION_RANGE_TYPE
)
4177 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptrRange
, 1);
4179 if (!(m_typeMask
& MALLOC_PTR_IN_USE_RANGE_TYPE
)) {
4180 m_coalescedSpans
.clear();
4184 Vector
<vm_range_t
, 1024> allocatedPointers
;
4185 for (size_t i
= 0; i
< m_coalescedSpans
.size(); ++i
) {
4186 Span
*theSpan
= m_coalescedSpans
[i
];
4190 vm_address_t spanStartAddress
= theSpan
->start
<< kPageShift
;
4191 vm_size_t spanSizeInBytes
= theSpan
->length
* kPageSize
;
4193 if (!theSpan
->sizeclass
) {
4194 // If it's an allocated large object span, mark it as in use
4195 if (!m_freeObjectFinder
.isFreeObject(spanStartAddress
))
4196 allocatedPointers
.append((vm_range_t
){spanStartAddress
, spanSizeInBytes
});
4198 const size_t objectSize
= ByteSizeForClass(theSpan
->sizeclass
);
4200 // Mark each allocated small object within the span as in use
4201 const vm_address_t endOfSpan
= spanStartAddress
+ spanSizeInBytes
;
4202 for (vm_address_t object
= spanStartAddress
; object
+ objectSize
<= endOfSpan
; object
+= objectSize
) {
4203 if (!m_freeObjectFinder
.isFreeObject(object
))
4204 allocatedPointers
.append((vm_range_t
){object
, objectSize
});
4209 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, allocatedPointers
.data(), allocatedPointers
.size());
4211 m_coalescedSpans
.clear();
4214 int visit(void* ptr
)
4219 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
4223 if (m_seenPointers
.contains(ptr
))
4224 return span
->length
;
4225 m_seenPointers
.add(ptr
);
4227 if (!m_coalescedSpans
.size()) {
4228 m_coalescedSpans
.append(span
);
4229 return span
->length
;
4232 Span
* previousSpan
= m_coalescedSpans
[m_coalescedSpans
.size() - 1];
4233 vm_address_t previousSpanStartAddress
= previousSpan
->start
<< kPageShift
;
4234 vm_size_t previousSpanSizeInBytes
= previousSpan
->length
* kPageSize
;
4236 // If the new span is adjacent to the previous span, do nothing for now.
4237 vm_address_t spanStartAddress
= span
->start
<< kPageShift
;
4238 if (spanStartAddress
== previousSpanStartAddress
+ previousSpanSizeInBytes
) {
4239 m_coalescedSpans
.append(span
);
4240 return span
->length
;
4243 // New span is not adjacent to previous span, so record the spans coalesced so far.
4244 recordPendingRegions();
4245 m_coalescedSpans
.append(span
);
4247 return span
->length
;
4251 class AdminRegionRecorder
{
4254 unsigned m_typeMask
;
4255 vm_range_recorder_t
* m_recorder
;
4256 const RemoteMemoryReader
& m_reader
;
4258 Vector
<vm_range_t
, 1024> m_pendingRegions
;
4261 AdminRegionRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
)
4263 , m_context(context
)
4264 , m_typeMask(typeMask
)
4265 , m_recorder(recorder
)
4269 void recordRegion(vm_address_t ptr
, size_t size
)
4271 if (m_typeMask
& MALLOC_ADMIN_REGION_RANGE_TYPE
)
4272 m_pendingRegions
.append((vm_range_t
){ ptr
, size
});
4275 void visit(void *ptr
, size_t size
)
4277 recordRegion(reinterpret_cast<vm_address_t
>(ptr
), size
);
4280 void recordPendingRegions()
4282 if (m_pendingRegions
.size()) {
4283 (*m_recorder
)(m_task
, m_context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, m_pendingRegions
.data(), m_pendingRegions
.size());
4284 m_pendingRegions
.clear();
4288 ~AdminRegionRecorder()
4290 ASSERT(!m_pendingRegions
.size());
4294 kern_return_t
FastMallocZone::enumerate(task_t task
, void* context
, unsigned typeMask
, vm_address_t zoneAddress
, memory_reader_t reader
, vm_range_recorder_t recorder
)
4296 RemoteMemoryReader
memoryReader(task
, reader
);
4300 FastMallocZone
* mzone
= memoryReader(reinterpret_cast<FastMallocZone
*>(zoneAddress
));
4301 TCMalloc_PageHeap
* pageHeap
= memoryReader(mzone
->m_pageHeap
);
4302 TCMalloc_ThreadCache
** threadHeapsPointer
= memoryReader(mzone
->m_threadHeaps
);
4303 TCMalloc_ThreadCache
* threadHeaps
= memoryReader(*threadHeapsPointer
);
4305 TCMalloc_Central_FreeListPadded
* centralCaches
= memoryReader(mzone
->m_centralCaches
, sizeof(TCMalloc_Central_FreeListPadded
) * kNumClasses
);
4307 FreeObjectFinder
finder(memoryReader
);
4308 finder
.findFreeObjects(threadHeaps
);
4309 finder
.findFreeObjects(centralCaches
, kNumClasses
, mzone
->m_centralCaches
);
4311 TCMalloc_PageHeap::PageMap
* pageMap
= &pageHeap
->pagemap_
;
4312 PageMapFreeObjectFinder
pageMapFinder(memoryReader
, finder
);
4313 pageMap
->visitValues(pageMapFinder
, memoryReader
);
4315 PageMapMemoryUsageRecorder
usageRecorder(task
, context
, typeMask
, recorder
, memoryReader
, finder
);
4316 pageMap
->visitValues(usageRecorder
, memoryReader
);
4317 usageRecorder
.recordPendingRegions();
4319 AdminRegionRecorder
adminRegionRecorder(task
, context
, typeMask
, recorder
, memoryReader
);
4320 pageMap
->visitAllocations(adminRegionRecorder
, memoryReader
);
4322 PageHeapAllocator
<Span
>* spanAllocator
= memoryReader(mzone
->m_spanAllocator
);
4323 PageHeapAllocator
<TCMalloc_ThreadCache
>* pageHeapAllocator
= memoryReader(mzone
->m_pageHeapAllocator
);
4325 spanAllocator
->recordAdministrativeRegions(adminRegionRecorder
, memoryReader
);
4326 pageHeapAllocator
->recordAdministrativeRegions(adminRegionRecorder
, memoryReader
);
4328 adminRegionRecorder
.recordPendingRegions();
4333 size_t FastMallocZone::size(malloc_zone_t
*, const void*)
4338 void* FastMallocZone::zoneMalloc(malloc_zone_t
*, size_t)
4343 void* FastMallocZone::zoneCalloc(malloc_zone_t
*, size_t, size_t)
4348 void FastMallocZone::zoneFree(malloc_zone_t
*, void* ptr
)
4350 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
4351 // is not in this zone. When this happens, the pointer being freed was not allocated by any
4352 // zone so we need to print a useful error for the application developer.
4353 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr
);
4356 void* FastMallocZone::zoneRealloc(malloc_zone_t
*, void*, size_t)
4368 malloc_introspection_t jscore_fastmalloc_introspection
= { &FastMallocZone::enumerate
, &FastMallocZone::goodSize
, &FastMallocZone::check
, &FastMallocZone::print
,
4369 &FastMallocZone::log
, &FastMallocZone::forceLock
, &FastMallocZone::forceUnlock
, &FastMallocZone::statistics
4371 #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) || PLATFORM(IPHONE)
4372 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
4374 #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) && !OS(IPHONE_OS)
4375 , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
4381 FastMallocZone::FastMallocZone(TCMalloc_PageHeap
* pageHeap
, TCMalloc_ThreadCache
** threadHeaps
, TCMalloc_Central_FreeListPadded
* centralCaches
, PageHeapAllocator
<Span
>* spanAllocator
, PageHeapAllocator
<TCMalloc_ThreadCache
>* pageHeapAllocator
)
4382 : m_pageHeap(pageHeap
)
4383 , m_threadHeaps(threadHeaps
)
4384 , m_centralCaches(centralCaches
)
4385 , m_spanAllocator(spanAllocator
)
4386 , m_pageHeapAllocator(pageHeapAllocator
)
4388 memset(&m_zone
, 0, sizeof(m_zone
));
4390 m_zone
.zone_name
= "JavaScriptCore FastMalloc";
4391 m_zone
.size
= &FastMallocZone::size
;
4392 m_zone
.malloc
= &FastMallocZone::zoneMalloc
;
4393 m_zone
.calloc
= &FastMallocZone::zoneCalloc
;
4394 m_zone
.realloc
= &FastMallocZone::zoneRealloc
;
4395 m_zone
.free
= &FastMallocZone::zoneFree
;
4396 m_zone
.valloc
= &FastMallocZone::zoneValloc
;
4397 m_zone
.destroy
= &FastMallocZone::zoneDestroy
;
4398 m_zone
.introspect
= &jscore_fastmalloc_introspection
;
4399 malloc_zone_register(&m_zone
);
4403 void FastMallocZone::init()
4405 static FastMallocZone
zone(pageheap
, &thread_heaps
, static_cast<TCMalloc_Central_FreeListPadded
*>(central_cache
), &span_allocator
, &threadheap_allocator
);
4411 void releaseFastMallocFreeMemory()
4413 // Flush free pages in the current thread cache back to the page heap.
4414 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
4415 // The second pass flushes everything.
4416 if (TCMalloc_ThreadCache
* threadCache
= TCMalloc_ThreadCache::GetCacheIfPresent()) {
4417 threadCache
->Scavenge();
4418 threadCache
->Scavenge();
4421 SpinLockHolder
h(&pageheap_lock
);
4422 pageheap
->ReleaseFreePages();
4425 FastMallocStatistics
fastMallocStatistics()
4427 FastMallocStatistics statistics
;
4429 SpinLockHolder
lockHolder(&pageheap_lock
);
4430 statistics
.heapSize
= static_cast<size_t>(pageheap
->SystemBytes());
4431 statistics
.freeSizeInHeap
= static_cast<size_t>(pageheap
->FreeBytes());
4432 statistics
.returnedSize
= pageheap
->ReturnedBytes();
4433 statistics
.freeSizeInCaches
= 0;
4434 for (TCMalloc_ThreadCache
* threadCache
= thread_heaps
; threadCache
; threadCache
= threadCache
->next_
)
4435 statistics
.freeSizeInCaches
+= threadCache
->Size();
4437 for (unsigned cl
= 0; cl
< kNumClasses
; ++cl
) {
4438 const int length
= central_cache
[cl
].length();
4439 const int tc_length
= central_cache
[cl
].tc_length();
4440 statistics
.freeSizeInCaches
+= ByteSizeForClass(cl
) * (length
+ tc_length
);
4448 #endif // FORCE_SYSTEM_MALLOC