1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "FastMalloc.h"
80 #include "Assertions.h"
81 #if ENABLE(JSC_MULTIPLE_THREADS)
85 #include <Availability.h>
87 #ifndef NO_TCMALLOC_SAMPLES
89 #define NO_TCMALLOC_SAMPLES
93 #if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
94 #define FORCE_SYSTEM_MALLOC 0
96 #define FORCE_SYSTEM_MALLOC 1
99 #define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC))
104 #if ENABLE(JSC_MULTIPLE_THREADS)
105 static pthread_key_t isForbiddenKey
;
106 static pthread_once_t isForbiddenKeyOnce
= PTHREAD_ONCE_INIT
;
107 static void initializeIsForbiddenKey()
109 pthread_key_create(&isForbiddenKey
, 0);
112 static bool isForbidden()
114 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
115 return !!pthread_getspecific(isForbiddenKey
);
118 void fastMallocForbid()
120 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
121 pthread_setspecific(isForbiddenKey
, &isForbiddenKey
);
124 void fastMallocAllow()
126 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
127 pthread_setspecific(isForbiddenKey
, 0);
132 static bool staticIsForbidden
;
133 static bool isForbidden()
135 return staticIsForbidden
;
138 void fastMallocForbid()
140 staticIsForbidden
= true;
143 void fastMallocAllow()
145 staticIsForbidden
= false;
147 #endif // ENABLE(JSC_MULTIPLE_THREADS)
156 void* fastZeroedMalloc(size_t n
)
158 void* result
= fastMalloc(n
);
159 memset(result
, 0, n
);
163 void* tryFastZeroedMalloc(size_t n
)
165 void* result
= tryFastMalloc(n
);
168 memset(result
, 0, n
);
174 #if FORCE_SYSTEM_MALLOC
177 #if !PLATFORM(WIN_OS)
185 void* tryFastMalloc(size_t n
)
187 ASSERT(!isForbidden());
191 void* fastMalloc(size_t n
)
193 ASSERT(!isForbidden());
194 void* result
= malloc(n
);
200 void* tryFastCalloc(size_t n_elements
, size_t element_size
)
202 ASSERT(!isForbidden());
203 return calloc(n_elements
, element_size
);
206 void* fastCalloc(size_t n_elements
, size_t element_size
)
208 ASSERT(!isForbidden());
209 void* result
= calloc(n_elements
, element_size
);
215 void fastFree(void* p
)
217 ASSERT(!isForbidden());
221 void* tryFastRealloc(void* p
, size_t n
)
223 ASSERT(!isForbidden());
224 return realloc(p
, n
);
227 void* fastRealloc(void* p
, size_t n
)
229 ASSERT(!isForbidden());
230 void* result
= realloc(p
, n
);
236 void releaseFastMallocFreeMemory() { }
238 FastMallocStatistics
fastMallocStatistics()
240 FastMallocStatistics statistics
= { 0, 0, 0, 0 };
247 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
248 // It will never be used in this case, so it's type and value are less interesting than its presence.
249 extern "C" const int jscore_fastmalloc_introspection
= 0;
252 #else // FORCE_SYSTEM_MALLOC
256 #elif HAVE(INTTYPES_H)
257 #include <inttypes.h>
259 #include <sys/types.h>
262 #include "AlwaysInline.h"
263 #include "Assertions.h"
264 #include "TCPackedCache.h"
265 #include "TCPageMap.h"
266 #include "TCSpinLock.h"
267 #include "TCSystemAlloc.h"
276 #ifndef WIN32_LEAN_AND_MEAN
277 #define WIN32_LEAN_AND_MEAN
285 #include "MallocZoneSupport.h"
286 #include <wtf/HashSet.h>
293 // Calling pthread_getspecific through a global function pointer is faster than a normal
294 // call to the function on Mac OS X, and it's used in performance-critical code. So we
295 // use a function pointer. But that's not necessarily faster on other platforms, and we had
296 // problems with this technique on Windows, so we'll do this only on Mac OS X.
298 static void* (*pthread_getspecific_function_pointer
)(pthread_key_t
) = pthread_getspecific
;
299 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
302 #define DEFINE_VARIABLE(type, name, value, meaning) \
303 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
304 type FLAGS_##name(value); \
305 char FLAGS_no##name; \
307 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
309 #define DEFINE_int64(name, value, meaning) \
310 DEFINE_VARIABLE(int64_t, name, value, meaning)
312 #define DEFINE_double(name, value, meaning) \
313 DEFINE_VARIABLE(double, name, value, meaning)
317 #define malloc fastMalloc
318 #define calloc fastCalloc
319 #define free fastFree
320 #define realloc fastRealloc
322 #define MESSAGE LOG_ERROR
323 #define CHECK_CONDITION ASSERT
326 class TCMalloc_PageHeap
;
327 class TCMalloc_ThreadCache
;
328 class TCMalloc_Central_FreeListPadded
;
330 class FastMallocZone
{
334 static kern_return_t
enumerate(task_t
, void*, unsigned typeMmask
, vm_address_t zoneAddress
, memory_reader_t
, vm_range_recorder_t
);
335 static size_t goodSize(malloc_zone_t
*, size_t size
) { return size
; }
336 static boolean_t
check(malloc_zone_t
*) { return true; }
337 static void print(malloc_zone_t
*, boolean_t
) { }
338 static void log(malloc_zone_t
*, void*) { }
339 static void forceLock(malloc_zone_t
*) { }
340 static void forceUnlock(malloc_zone_t
*) { }
341 static void statistics(malloc_zone_t
*, malloc_statistics_t
* stats
) { memset(stats
, 0, sizeof(malloc_statistics_t
)); }
344 FastMallocZone(TCMalloc_PageHeap
*, TCMalloc_ThreadCache
**, TCMalloc_Central_FreeListPadded
*);
345 static size_t size(malloc_zone_t
*, const void*);
346 static void* zoneMalloc(malloc_zone_t
*, size_t);
347 static void* zoneCalloc(malloc_zone_t
*, size_t numItems
, size_t size
);
348 static void zoneFree(malloc_zone_t
*, void*);
349 static void* zoneRealloc(malloc_zone_t
*, void*, size_t);
350 static void* zoneValloc(malloc_zone_t
*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
351 static void zoneDestroy(malloc_zone_t
*) { }
353 malloc_zone_t m_zone
;
354 TCMalloc_PageHeap
* m_pageHeap
;
355 TCMalloc_ThreadCache
** m_threadHeaps
;
356 TCMalloc_Central_FreeListPadded
* m_centralCaches
;
364 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
365 // you're porting to a system where you really can't get a stacktrace.
366 #ifdef NO_TCMALLOC_SAMPLES
367 // We use #define so code compiles even if you #include stacktrace.h somehow.
368 # define GetStackTrace(stack, depth, skip) (0)
370 # include <google/stacktrace.h>
374 // Even if we have support for thread-local storage in the compiler
375 // and linker, the OS may not support it. We need to check that at
376 // runtime. Right now, we have to keep a manual set of "bad" OSes.
377 #if defined(HAVE_TLS)
378 static bool kernel_supports_tls
= false; // be conservative
379 static inline bool KernelSupportsTLS() {
380 return kernel_supports_tls
;
382 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
383 static void CheckIfKernelSupportsTLS() {
384 kernel_supports_tls
= false;
387 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
388 static void CheckIfKernelSupportsTLS() {
390 if (uname(&buf
) != 0) { // should be impossible
391 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno
);
392 kernel_supports_tls
= false;
393 } else if (strcasecmp(buf
.sysname
, "linux") == 0) {
394 // The linux case: the first kernel to support TLS was 2.6.0
395 if (buf
.release
[0] < '2' && buf
.release
[1] == '.') // 0.x or 1.x
396 kernel_supports_tls
= false;
397 else if (buf
.release
[0] == '2' && buf
.release
[1] == '.' &&
398 buf
.release
[2] >= '0' && buf
.release
[2] < '6' &&
399 buf
.release
[3] == '.') // 2.0 - 2.5
400 kernel_supports_tls
= false;
402 kernel_supports_tls
= true;
403 } else { // some other kernel, we'll be optimisitic
404 kernel_supports_tls
= true;
406 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
408 # endif // HAVE_DECL_UNAME
411 // __THROW is defined in glibc systems. It means, counter-intuitively,
412 // "This function will never throw an exception." It's an optional
413 // optimization tool, but we may need to use it to match glibc prototypes.
414 #ifndef __THROW // I guess we're not on a glibc system
415 # define __THROW // __THROW is just an optimization, so ok to make it ""
418 //-------------------------------------------------------------------
420 //-------------------------------------------------------------------
422 // Not all possible combinations of the following parameters make
423 // sense. In particular, if kMaxSize increases, you may have to
424 // increase kNumClasses as well.
425 static const size_t kPageShift
= 12;
426 static const size_t kPageSize
= 1 << kPageShift
;
427 static const size_t kMaxSize
= 8u * kPageSize
;
428 static const size_t kAlignShift
= 3;
429 static const size_t kAlignment
= 1 << kAlignShift
;
430 static const size_t kNumClasses
= 68;
432 // Allocates a big block of memory for the pagemap once we reach more than
434 static const size_t kPageMapBigAllocationThreshold
= 128 << 20;
436 // Minimum number of pages to fetch from system at a time. Must be
437 // significantly bigger than kBlockSize to amortize system-call
438 // overhead, and also to reduce external fragementation. Also, we
439 // should keep this value big because various incarnations of Linux
440 // have small limits on the number of mmap() regions per
442 static const size_t kMinSystemAlloc
= 1 << (20 - kPageShift
);
444 // Number of objects to move between a per-thread list and a central
445 // list in one shot. We want this to be not too small so we can
446 // amortize the lock overhead for accessing the central list. Making
447 // it too big may temporarily cause unnecessary memory wastage in the
448 // per-thread free list until the scavenger cleans up the list.
449 static int num_objects_to_move
[kNumClasses
];
451 // Maximum length we allow a per-thread free-list to have before we
452 // move objects from it into the corresponding central free-list. We
453 // want this big to avoid locking the central free-list too often. It
454 // should not hurt to make this list somewhat big because the
455 // scavenging code will shrink it down when its contents are not in use.
456 static const int kMaxFreeListLength
= 256;
458 // Lower and upper bounds on the per-thread cache sizes
459 static const size_t kMinThreadCacheSize
= kMaxSize
* 2;
460 static const size_t kMaxThreadCacheSize
= 512 * 1024;
462 // Default bound on the total amount of thread caches
463 static const size_t kDefaultOverallThreadCacheSize
= 16 << 20;
465 // For all span-lengths < kMaxPages we keep an exact-size list.
466 // REQUIRED: kMaxPages >= kMinSystemAlloc;
467 static const size_t kMaxPages
= kMinSystemAlloc
;
469 /* The smallest prime > 2^n */
470 static int primes_list
[] = {
471 // Small values might cause high rates of sampling
472 // and hence commented out.
473 // 2, 5, 11, 17, 37, 67, 131, 257,
474 // 521, 1031, 2053, 4099, 8209, 16411,
475 32771, 65537, 131101, 262147, 524309, 1048583,
476 2097169, 4194319, 8388617, 16777259, 33554467 };
478 // Twice the approximate gap between sampling actions.
479 // I.e., we take one sample approximately once every
480 // tcmalloc_sample_parameter/2
481 // bytes of allocation, i.e., ~ once every 128KB.
482 // Must be a prime number.
483 #ifdef NO_TCMALLOC_SAMPLES
484 DEFINE_int64(tcmalloc_sample_parameter
, 0,
485 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
486 static size_t sample_period
= 0;
488 DEFINE_int64(tcmalloc_sample_parameter
, 262147,
489 "Twice the approximate gap between sampling actions."
490 " Must be a prime number. Otherwise will be rounded up to a "
491 " larger prime number");
492 static size_t sample_period
= 262147;
495 // Protects sample_period above
496 static SpinLock sample_period_lock
= SPINLOCK_INITIALIZER
;
498 // Parameters for controlling how fast memory is returned to the OS.
500 DEFINE_double(tcmalloc_release_rate
, 1,
501 "Rate at which we release unused memory to the system. "
502 "Zero means we never release memory back to the system. "
503 "Increase this flag to return memory faster; decrease it "
504 "to return memory slower. Reasonable rates are in the "
507 //-------------------------------------------------------------------
508 // Mapping from size to size_class and vice versa
509 //-------------------------------------------------------------------
511 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
512 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
513 // So for these larger sizes we have an array indexed by ceil(size/128).
515 // We flatten both logical arrays into one physical array and use
516 // arithmetic to compute an appropriate index. The constants used by
517 // ClassIndex() were selected to make the flattening work.
520 // Size Expression Index
521 // -------------------------------------------------------
525 // 1024 (1024 + 7) / 8 128
526 // 1025 (1025 + 127 + (120<<7)) / 128 129
528 // 32768 (32768 + 127 + (120<<7)) / 128 376
529 static const size_t kMaxSmallSize
= 1024;
530 static const int shift_amount
[2] = { 3, 7 }; // For divides by 8 or 128
531 static const int add_amount
[2] = { 7, 127 + (120 << 7) };
532 static unsigned char class_array
[377];
534 // Compute index of the class_array[] entry for a given size
535 static inline int ClassIndex(size_t s
) {
536 const int i
= (s
> kMaxSmallSize
);
537 return static_cast<int>((s
+ add_amount
[i
]) >> shift_amount
[i
]);
540 // Mapping from size class to max size storable in that class
541 static size_t class_to_size
[kNumClasses
];
543 // Mapping from size class to number of pages to allocate at a time
544 static size_t class_to_pages
[kNumClasses
];
546 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
547 // back and forth between thread caches and the central cache for a given size
550 void *head
; // Head of chain of objects.
551 void *tail
; // Tail of chain of objects.
553 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
554 // slots to put link list chains into. To keep memory usage bounded the total
555 // number of TCEntries across size classes is fixed. Currently each size
556 // class is initially given one TCEntry which also means that the maximum any
557 // one class can have is kNumClasses.
558 static const int kNumTransferEntries
= kNumClasses
;
560 // Note: the following only works for "n"s that fit in 32-bits, but
561 // that is fine since we only use it for small sizes.
562 static inline int LgFloor(size_t n
) {
564 for (int i
= 4; i
>= 0; --i
) {
565 int shift
= (1 << i
);
566 size_t x
= n
>> shift
;
576 // Some very basic linked list functions for dealing with using void * as
579 static inline void *SLL_Next(void *t
) {
580 return *(reinterpret_cast<void**>(t
));
583 static inline void SLL_SetNext(void *t
, void *n
) {
584 *(reinterpret_cast<void**>(t
)) = n
;
587 static inline void SLL_Push(void **list
, void *element
) {
588 SLL_SetNext(element
, *list
);
592 static inline void *SLL_Pop(void **list
) {
593 void *result
= *list
;
594 *list
= SLL_Next(*list
);
599 // Remove N elements from a linked list to which head points. head will be
600 // modified to point to the new head. start and end will point to the first
601 // and last nodes of the range. Note that end will point to NULL after this
602 // function is called.
603 static inline void SLL_PopRange(void **head
, int N
, void **start
, void **end
) {
611 for (int i
= 1; i
< N
; ++i
) {
617 *head
= SLL_Next(tmp
);
618 // Unlink range from list.
619 SLL_SetNext(tmp
, NULL
);
622 static inline void SLL_PushRange(void **head
, void *start
, void *end
) {
624 SLL_SetNext(end
, *head
);
628 static inline size_t SLL_Size(void *head
) {
632 head
= SLL_Next(head
);
637 // Setup helper functions.
639 static ALWAYS_INLINE
size_t SizeClass(size_t size
) {
640 return class_array
[ClassIndex(size
)];
643 // Get the byte-size for a specified class
644 static ALWAYS_INLINE
size_t ByteSizeForClass(size_t cl
) {
645 return class_to_size
[cl
];
647 static int NumMoveSize(size_t size
) {
648 if (size
== 0) return 0;
649 // Use approx 64k transfers between thread and central caches.
650 int num
= static_cast<int>(64.0 * 1024.0 / size
);
651 if (num
< 2) num
= 2;
652 // Clamp well below kMaxFreeListLength to avoid ping pong between central
653 // and thread caches.
654 if (num
> static_cast<int>(0.8 * kMaxFreeListLength
))
655 num
= static_cast<int>(0.8 * kMaxFreeListLength
);
657 // Also, avoid bringing in too many objects into small object free
658 // lists. There are lots of such lists, and if we allow each one to
659 // fetch too many at a time, we end up having to scavenge too often
660 // (especially when there are lots of threads and each thread gets a
661 // small allowance for its thread cache).
663 // TODO: Make thread cache free list sizes dynamic so that we do not
664 // have to equally divide a fixed resource amongst lots of threads.
665 if (num
> 32) num
= 32;
670 // Initialize the mapping arrays
671 static void InitSizeClasses() {
672 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
673 if (ClassIndex(0) < 0) {
674 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
677 if (static_cast<size_t>(ClassIndex(kMaxSize
)) >= sizeof(class_array
)) {
678 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize
));
682 // Compute the size classes we want to use
683 size_t sc
= 1; // Next size class to assign
684 unsigned char alignshift
= kAlignShift
;
686 for (size_t size
= kAlignment
; size
<= kMaxSize
; size
+= (1 << alignshift
)) {
687 int lg
= LgFloor(size
);
689 // Increase alignment every so often.
691 // Since we double the alignment every time size doubles and
692 // size >= 128, this means that space wasted due to alignment is
693 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
694 // bytes, so the space wasted as a percentage starts falling for
696 if ((lg
>= 7) && (alignshift
< 8)) {
702 // Allocate enough pages so leftover is less than 1/8 of total.
703 // This bounds wasted space to at most 12.5%.
704 size_t psize
= kPageSize
;
705 while ((psize
% size
) > (psize
>> 3)) {
708 const size_t my_pages
= psize
>> kPageShift
;
710 if (sc
> 1 && my_pages
== class_to_pages
[sc
-1]) {
711 // See if we can merge this into the previous class without
712 // increasing the fragmentation of the previous class.
713 const size_t my_objects
= (my_pages
<< kPageShift
) / size
;
714 const size_t prev_objects
= (class_to_pages
[sc
-1] << kPageShift
)
715 / class_to_size
[sc
-1];
716 if (my_objects
== prev_objects
) {
717 // Adjust last class to include this size
718 class_to_size
[sc
-1] = size
;
724 class_to_pages
[sc
] = my_pages
;
725 class_to_size
[sc
] = size
;
728 if (sc
!= kNumClasses
) {
729 MESSAGE("wrong number of size classes: found %" PRIuS
" instead of %d\n",
730 sc
, int(kNumClasses
));
734 // Initialize the mapping arrays
736 for (unsigned char c
= 1; c
< kNumClasses
; c
++) {
737 const size_t max_size_in_class
= class_to_size
[c
];
738 for (size_t s
= next_size
; s
<= max_size_in_class
; s
+= kAlignment
) {
739 class_array
[ClassIndex(s
)] = c
;
741 next_size
= static_cast<int>(max_size_in_class
+ kAlignment
);
744 // Double-check sizes just to be safe
745 for (size_t size
= 0; size
<= kMaxSize
; size
++) {
746 const size_t sc
= SizeClass(size
);
748 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
751 if (sc
> 1 && size
<= class_to_size
[sc
-1]) {
752 MESSAGE("Allocating unnecessarily large class %" PRIuS
" for %" PRIuS
756 if (sc
>= kNumClasses
) {
757 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
760 const size_t s
= class_to_size
[sc
];
762 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
766 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
771 // Initialize the num_objects_to_move array.
772 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
773 num_objects_to_move
[cl
] = NumMoveSize(ByteSizeForClass(cl
));
778 // Dump class sizes and maximum external wastage per size class
779 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
780 const int alloc_size
= class_to_pages
[cl
] << kPageShift
;
781 const int alloc_objs
= alloc_size
/ class_to_size
[cl
];
782 const int min_used
= (class_to_size
[cl
-1] + 1) * alloc_objs
;
783 const int max_waste
= alloc_size
- min_used
;
784 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
786 int(class_to_size
[cl
-1] + 1),
787 int(class_to_size
[cl
]),
788 int(class_to_pages
[cl
] << kPageShift
),
789 max_waste
* 100.0 / alloc_size
796 // -------------------------------------------------------------------------
797 // Simple allocator for objects of a specified type. External locking
798 // is required before accessing one of these objects.
799 // -------------------------------------------------------------------------
801 // Metadata allocator -- keeps stats about how many bytes allocated
802 static uint64_t metadata_system_bytes
= 0;
803 static void* MetaDataAlloc(size_t bytes
) {
804 void* result
= TCMalloc_SystemAlloc(bytes
, 0);
805 if (result
!= NULL
) {
806 metadata_system_bytes
+= bytes
;
812 class PageHeapAllocator
{
814 // How much to allocate from system at a time
815 static const size_t kAllocIncrement
= 32 << 10;
818 static const size_t kAlignedSize
819 = (((sizeof(T
) + kAlignment
- 1) / kAlignment
) * kAlignment
);
821 // Free area from which to carve new objects
825 // Free list of already carved objects
828 // Number of allocated but unfreed objects
833 ASSERT(kAlignedSize
<= kAllocIncrement
);
843 if (free_list_
!= NULL
) {
845 free_list_
= *(reinterpret_cast<void**>(result
));
847 if (free_avail_
< kAlignedSize
) {
849 free_area_
= reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement
));
850 if (free_area_
== NULL
) CRASH();
851 free_avail_
= kAllocIncrement
;
854 free_area_
+= kAlignedSize
;
855 free_avail_
-= kAlignedSize
;
858 return reinterpret_cast<T
*>(result
);
862 *(reinterpret_cast<void**>(p
)) = free_list_
;
867 int inuse() const { return inuse_
; }
870 // -------------------------------------------------------------------------
871 // Span - a contiguous run of pages
872 // -------------------------------------------------------------------------
874 // Type that can hold a page number
875 typedef uintptr_t PageID
;
877 // Type that can hold the length of a run of pages
878 typedef uintptr_t Length
;
880 static const Length kMaxValidPages
= (~static_cast<Length
>(0)) >> kPageShift
;
882 // Convert byte size into pages. This won't overflow, but may return
883 // an unreasonably large value if bytes is huge enough.
884 static inline Length
pages(size_t bytes
) {
885 return (bytes
>> kPageShift
) +
886 ((bytes
& (kPageSize
- 1)) > 0 ? 1 : 0);
889 // Convert a user size into the number of bytes that will actually be
891 static size_t AllocationSize(size_t bytes
) {
892 if (bytes
> kMaxSize
) {
893 // Large object: we allocate an integral number of pages
894 ASSERT(bytes
<= (kMaxValidPages
<< kPageShift
));
895 return pages(bytes
) << kPageShift
;
897 // Small object: find the size class to which it belongs
898 return ByteSizeForClass(SizeClass(bytes
));
902 // Information kept for a span (a contiguous run of pages).
904 PageID start
; // Starting page number
905 Length length
; // Number of pages in span
906 Span
* next
; // Used when in link list
907 Span
* prev
; // Used when in link list
908 void* objects
; // Linked list of free objects
909 unsigned int free
: 1; // Is the span free
910 #ifndef NO_TCMALLOC_SAMPLES
911 unsigned int sample
: 1; // Sampled object?
913 unsigned int sizeclass
: 8; // Size-class for small objects (or 0)
914 unsigned int refcount
: 11; // Number of non-free objects
915 bool decommitted
: 1;
919 // For debugging, we can keep a log events per span
926 #if TCMALLOC_TRACK_DECOMMITED_SPANS
927 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
929 #define ASSERT_SPAN_COMMITTED(span)
933 void Event(Span
* span
, char op
, int v
= 0) {
934 span
->history
[span
->nexthistory
] = op
;
935 span
->value
[span
->nexthistory
] = v
;
937 if (span
->nexthistory
== sizeof(span
->history
)) span
->nexthistory
= 0;
940 #define Event(s,o,v) ((void) 0)
943 // Allocator/deallocator for spans
944 static PageHeapAllocator
<Span
> span_allocator
;
945 static Span
* NewSpan(PageID p
, Length len
) {
946 Span
* result
= span_allocator
.New();
947 memset(result
, 0, sizeof(*result
));
949 result
->length
= len
;
951 result
->nexthistory
= 0;
956 static inline void DeleteSpan(Span
* span
) {
958 // In debug mode, trash the contents of deleted Spans
959 memset(span
, 0x3f, sizeof(*span
));
961 span_allocator
.Delete(span
);
964 // -------------------------------------------------------------------------
965 // Doubly linked list of spans.
966 // -------------------------------------------------------------------------
968 static inline void DLL_Init(Span
* list
) {
973 static inline void DLL_Remove(Span
* span
) {
974 span
->prev
->next
= span
->next
;
975 span
->next
->prev
= span
->prev
;
980 static ALWAYS_INLINE
bool DLL_IsEmpty(const Span
* list
) {
981 return list
->next
== list
;
984 static int DLL_Length(const Span
* list
) {
986 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
992 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
993 static void DLL_Print(const char* label
, const Span
* list
) {
994 MESSAGE("%-10s %p:", label
, list
);
995 for (const Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
996 MESSAGE(" <%p,%u,%u>", s
, s
->start
, s
->length
);
1002 static inline void DLL_Prepend(Span
* list
, Span
* span
) {
1003 ASSERT(span
->next
== NULL
);
1004 ASSERT(span
->prev
== NULL
);
1005 span
->next
= list
->next
;
1007 list
->next
->prev
= span
;
1011 // -------------------------------------------------------------------------
1012 // Stack traces kept for sampled allocations
1013 // The following state is protected by pageheap_lock_.
1014 // -------------------------------------------------------------------------
1016 // size/depth are made the same size as a pointer so that some generic
1017 // code below can conveniently cast them back and forth to void*.
1018 static const int kMaxStackDepth
= 31;
1020 uintptr_t size
; // Size of object
1021 uintptr_t depth
; // Number of PC values stored in array below
1022 void* stack
[kMaxStackDepth
];
1024 static PageHeapAllocator
<StackTrace
> stacktrace_allocator
;
1025 static Span sampled_objects
;
1027 // -------------------------------------------------------------------------
1028 // Map from page-id to per-page data
1029 // -------------------------------------------------------------------------
1031 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1032 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1033 // because sometimes the sizeclass is all the information we need.
1035 // Selector class -- general selector uses 3-level map
1036 template <int BITS
> class MapSelector
{
1038 typedef TCMalloc_PageMap3
<BITS
-kPageShift
> Type
;
1039 typedef PackedCache
<BITS
, uint64_t> CacheType
;
1042 #if defined(WTF_CHANGES)
1043 #if PLATFORM(X86_64)
1044 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1045 // can be excluded from the PageMap key.
1046 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1048 static const size_t kBitsUnusedOn64Bit
= 16;
1050 static const size_t kBitsUnusedOn64Bit
= 0;
1053 // A three-level map for 64-bit machines
1054 template <> class MapSelector
<64> {
1056 typedef TCMalloc_PageMap3
<64 - kPageShift
- kBitsUnusedOn64Bit
> Type
;
1057 typedef PackedCache
<64, uint64_t> CacheType
;
1061 // A two-level map for 32-bit machines
1062 template <> class MapSelector
<32> {
1064 typedef TCMalloc_PageMap2
<32 - kPageShift
> Type
;
1065 typedef PackedCache
<32 - kPageShift
, uint16_t> CacheType
;
1068 // -------------------------------------------------------------------------
1069 // Page-level allocator
1070 // * Eager coalescing
1072 // Heap for page-level allocation. We allow allocating and freeing a
1073 // contiguous runs of pages (called a "span").
1074 // -------------------------------------------------------------------------
1076 class TCMalloc_PageHeap
{
1080 // Allocate a run of "n" pages. Returns zero if out of memory.
1081 Span
* New(Length n
);
1083 // Delete the span "[p, p+n-1]".
1084 // REQUIRES: span was returned by earlier call to New() and
1085 // has not yet been deleted.
1086 void Delete(Span
* span
);
1088 // Mark an allocated span as being used for small objects of the
1089 // specified size-class.
1090 // REQUIRES: span was returned by an earlier call to New()
1091 // and has not yet been deleted.
1092 void RegisterSizeClass(Span
* span
, size_t sc
);
1094 // Split an allocated span into two spans: one of length "n" pages
1095 // followed by another span of length "span->length - n" pages.
1096 // Modifies "*span" to point to the first span of length "n" pages.
1097 // Returns a pointer to the second span.
1099 // REQUIRES: "0 < n < span->length"
1100 // REQUIRES: !span->free
1101 // REQUIRES: span->sizeclass == 0
1102 Span
* Split(Span
* span
, Length n
);
1104 // Return the descriptor for the specified page.
1105 inline Span
* GetDescriptor(PageID p
) const {
1106 return reinterpret_cast<Span
*>(pagemap_
.get(p
));
1110 inline Span
* GetDescriptorEnsureSafe(PageID p
)
1112 pagemap_
.Ensure(p
, 1);
1113 return GetDescriptor(p
);
1116 size_t ReturnedBytes() const;
1119 // Dump state to stderr
1121 void Dump(TCMalloc_Printer
* out
);
1124 // Return number of bytes allocated from system
1125 inline uint64_t SystemBytes() const { return system_bytes_
; }
1127 // Return number of free bytes in heap
1128 uint64_t FreeBytes() const {
1129 return (static_cast<uint64_t>(free_pages_
) << kPageShift
);
1133 bool CheckList(Span
* list
, Length min_pages
, Length max_pages
);
1135 // Release all pages on the free list for reuse by the OS:
1136 void ReleaseFreePages();
1138 // Return 0 if we have no information, or else the correct sizeclass for p.
1139 // Reads and writes to pagemap_cache_ do not require locking.
1140 // The entries are 64 bits on 64-bit hardware and 16 bits on
1141 // 32-bit hardware, and we don't mind raciness as long as each read of
1142 // an entry yields a valid entry, not a partially updated entry.
1143 size_t GetSizeClassIfCached(PageID p
) const {
1144 return pagemap_cache_
.GetOrDefault(p
, 0);
1146 void CacheSizeClass(PageID p
, size_t cl
) const { pagemap_cache_
.Put(p
, cl
); }
1149 // Pick the appropriate map and cache types based on pointer size
1150 typedef MapSelector
<8*sizeof(uintptr_t)>::Type PageMap
;
1151 typedef MapSelector
<8*sizeof(uintptr_t)>::CacheType PageMapCache
;
1153 mutable PageMapCache pagemap_cache_
;
1155 // We segregate spans of a given size into two circular linked
1156 // lists: one for normal spans, and one for spans whose memory
1157 // has been returned to the system.
1163 // List of free spans of length >= kMaxPages
1166 // Array mapping from span length to a doubly linked list of free spans
1167 SpanList free_
[kMaxPages
];
1169 // Number of pages kept in free lists
1170 uintptr_t free_pages_
;
1172 // Bytes allocated from system
1173 uint64_t system_bytes_
;
1175 bool GrowHeap(Length n
);
1177 // REQUIRES span->length >= n
1178 // Remove span from its free list, and move any leftover part of
1179 // span into appropriate free lists. Also update "span" to have
1180 // length exactly "n" and mark it as non-free so it can be returned
1183 // "released" is true iff "span" was found on a "returned" list.
1184 void Carve(Span
* span
, Length n
, bool released
);
1186 void RecordSpan(Span
* span
) {
1187 pagemap_
.set(span
->start
, span
);
1188 if (span
->length
> 1) {
1189 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1193 // Allocate a large span of length == n. If successful, returns a
1194 // span of exactly the specified length. Else, returns NULL.
1195 Span
* AllocLarge(Length n
);
1197 // Incrementally release some memory to the system.
1198 // IncrementalScavenge(n) is called whenever n pages are freed.
1199 void IncrementalScavenge(Length n
);
1201 // Number of pages to deallocate before doing more scavenging
1202 int64_t scavenge_counter_
;
1204 // Index of last free list we scavenged
1205 size_t scavenge_index_
;
1207 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
1208 friend class FastMallocZone
;
1212 void TCMalloc_PageHeap::init()
1214 pagemap_
.init(MetaDataAlloc
);
1215 pagemap_cache_
= PageMapCache(0);
1218 scavenge_counter_
= 0;
1219 // Start scavenging at kMaxPages list
1220 scavenge_index_
= kMaxPages
-1;
1221 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
1222 DLL_Init(&large_
.normal
);
1223 DLL_Init(&large_
.returned
);
1224 for (size_t i
= 0; i
< kMaxPages
; i
++) {
1225 DLL_Init(&free_
[i
].normal
);
1226 DLL_Init(&free_
[i
].returned
);
1230 inline Span
* TCMalloc_PageHeap::New(Length n
) {
1234 // Find first size >= n that has a non-empty list
1235 for (Length s
= n
; s
< kMaxPages
; s
++) {
1237 bool released
= false;
1238 if (!DLL_IsEmpty(&free_
[s
].normal
)) {
1239 // Found normal span
1240 ll
= &free_
[s
].normal
;
1241 } else if (!DLL_IsEmpty(&free_
[s
].returned
)) {
1242 // Found returned span; reallocate it
1243 ll
= &free_
[s
].returned
;
1246 // Keep looking in larger classes
1250 Span
* result
= ll
->next
;
1251 Carve(result
, n
, released
);
1252 #if TCMALLOC_TRACK_DECOMMITED_SPANS
1253 if (result
->decommitted
) {
1254 TCMalloc_SystemCommit(reinterpret_cast<void*>(result
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1255 result
->decommitted
= false;
1263 Span
* result
= AllocLarge(n
);
1264 if (result
!= NULL
) {
1265 ASSERT_SPAN_COMMITTED(result
);
1269 // Grow the heap and try again
1275 return AllocLarge(n
);
1278 Span
* TCMalloc_PageHeap::AllocLarge(Length n
) {
1279 // find the best span (closest to n in size).
1280 // The following loops implements address-ordered best-fit.
1281 bool from_released
= false;
1284 // Search through normal list
1285 for (Span
* span
= large_
.normal
.next
;
1286 span
!= &large_
.normal
;
1287 span
= span
->next
) {
1288 if (span
->length
>= n
) {
1290 || (span
->length
< best
->length
)
1291 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1293 from_released
= false;
1298 // Search through released list in case it has a better fit
1299 for (Span
* span
= large_
.returned
.next
;
1300 span
!= &large_
.returned
;
1301 span
= span
->next
) {
1302 if (span
->length
>= n
) {
1304 || (span
->length
< best
->length
)
1305 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1307 from_released
= true;
1313 Carve(best
, n
, from_released
);
1314 #if TCMALLOC_TRACK_DECOMMITED_SPANS
1315 if (best
->decommitted
) {
1316 TCMalloc_SystemCommit(reinterpret_cast<void*>(best
->start
<< kPageShift
), static_cast<size_t>(n
<< kPageShift
));
1317 best
->decommitted
= false;
1327 Span
* TCMalloc_PageHeap::Split(Span
* span
, Length n
) {
1329 ASSERT(n
< span
->length
);
1330 ASSERT(!span
->free
);
1331 ASSERT(span
->sizeclass
== 0);
1332 Event(span
, 'T', n
);
1334 const Length extra
= span
->length
- n
;
1335 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1336 Event(leftover
, 'U', extra
);
1337 RecordSpan(leftover
);
1338 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
1344 #if !TCMALLOC_TRACK_DECOMMITED_SPANS
1345 static ALWAYS_INLINE
void propagateDecommittedState(Span
*, Span
*) { }
1347 static ALWAYS_INLINE
void propagateDecommittedState(Span
* destination
, Span
* source
)
1349 destination
->decommitted
= source
->decommitted
;
1353 inline void TCMalloc_PageHeap::Carve(Span
* span
, Length n
, bool released
) {
1357 Event(span
, 'A', n
);
1359 const int extra
= static_cast<int>(span
->length
- n
);
1362 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1364 propagateDecommittedState(leftover
, span
);
1365 Event(leftover
, 'S', extra
);
1366 RecordSpan(leftover
);
1368 // Place leftover span on appropriate free list
1369 SpanList
* listpair
= (static_cast<size_t>(extra
) < kMaxPages
) ? &free_
[extra
] : &large_
;
1370 Span
* dst
= released
? &listpair
->returned
: &listpair
->normal
;
1371 DLL_Prepend(dst
, leftover
);
1374 pagemap_
.set(span
->start
+ n
- 1, span
);
1378 #if !TCMALLOC_TRACK_DECOMMITED_SPANS
1379 static ALWAYS_INLINE
void mergeDecommittedStates(Span
*, Span
*) { }
1381 static ALWAYS_INLINE
void mergeDecommittedStates(Span
* destination
, Span
* other
)
1383 if (other
->decommitted
)
1384 destination
->decommitted
= true;
1388 inline void TCMalloc_PageHeap::Delete(Span
* span
) {
1390 ASSERT(!span
->free
);
1391 ASSERT(span
->length
> 0);
1392 ASSERT(GetDescriptor(span
->start
) == span
);
1393 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
1394 span
->sizeclass
= 0;
1395 #ifndef NO_TCMALLOC_SAMPLES
1399 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1400 // necessary. We do not bother resetting the stale pagemap
1401 // entries for the pieces we are merging together because we only
1402 // care about the pagemap entries for the boundaries.
1404 // Note that the spans we merge into "span" may come out of
1405 // a "returned" list. For simplicity, we move these into the
1406 // "normal" list of the appropriate size class.
1407 const PageID p
= span
->start
;
1408 const Length n
= span
->length
;
1409 Span
* prev
= GetDescriptor(p
-1);
1410 if (prev
!= NULL
&& prev
->free
) {
1411 // Merge preceding span into this span
1412 ASSERT(prev
->start
+ prev
->length
== p
);
1413 const Length len
= prev
->length
;
1414 mergeDecommittedStates(span
, prev
);
1418 span
->length
+= len
;
1419 pagemap_
.set(span
->start
, span
);
1420 Event(span
, 'L', len
);
1422 Span
* next
= GetDescriptor(p
+n
);
1423 if (next
!= NULL
&& next
->free
) {
1424 // Merge next span into this span
1425 ASSERT(next
->start
== p
+n
);
1426 const Length len
= next
->length
;
1427 mergeDecommittedStates(span
, next
);
1430 span
->length
+= len
;
1431 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1432 Event(span
, 'R', len
);
1435 Event(span
, 'D', span
->length
);
1437 if (span
->length
< kMaxPages
) {
1438 DLL_Prepend(&free_
[span
->length
].normal
, span
);
1440 DLL_Prepend(&large_
.normal
, span
);
1444 IncrementalScavenge(n
);
1448 void TCMalloc_PageHeap::IncrementalScavenge(Length n
) {
1449 // Fast path; not yet time to release memory
1450 scavenge_counter_
-= n
;
1451 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
1453 static const size_t kDefaultReleaseDelay
= 64;
1455 // Find index of free list to scavenge
1456 size_t index
= scavenge_index_
+ 1;
1457 for (size_t i
= 0; i
< kMaxPages
+1; i
++) {
1458 if (index
> kMaxPages
) index
= 0;
1459 SpanList
* slist
= (index
== kMaxPages
) ? &large_
: &free_
[index
];
1460 if (!DLL_IsEmpty(&slist
->normal
)) {
1461 // Release the last span on the normal portion of this list
1462 Span
* s
= slist
->normal
.prev
;
1464 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1465 static_cast<size_t>(s
->length
<< kPageShift
));
1466 #if TCMALLOC_TRACK_DECOMMITED_SPANS
1467 s
->decommitted
= true;
1469 DLL_Prepend(&slist
->returned
, s
);
1471 scavenge_counter_
= std::max
<size_t>(16UL, std::min
<size_t>(kDefaultReleaseDelay
, kDefaultReleaseDelay
- (free_pages_
/ kDefaultReleaseDelay
)));
1473 if (index
== kMaxPages
&& !DLL_IsEmpty(&slist
->normal
))
1474 scavenge_index_
= index
- 1;
1476 scavenge_index_
= index
;
1482 // Nothing to scavenge, delay for a while
1483 scavenge_counter_
= kDefaultReleaseDelay
;
1486 void TCMalloc_PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
1487 // Associate span object with all interior pages as well
1488 ASSERT(!span
->free
);
1489 ASSERT(GetDescriptor(span
->start
) == span
);
1490 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
1491 Event(span
, 'C', sc
);
1492 span
->sizeclass
= static_cast<unsigned int>(sc
);
1493 for (Length i
= 1; i
< span
->length
-1; i
++) {
1494 pagemap_
.set(span
->start
+i
, span
);
1499 size_t TCMalloc_PageHeap::ReturnedBytes() const {
1501 for (unsigned s
= 0; s
< kMaxPages
; s
++) {
1502 const int r_length
= DLL_Length(&free_
[s
].returned
);
1503 unsigned r_pages
= s
* r_length
;
1504 result
+= r_pages
<< kPageShift
;
1507 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
)
1508 result
+= s
->length
<< kPageShift
;
1514 static double PagesToMB(uint64_t pages
) {
1515 return (pages
<< kPageShift
) / 1048576.0;
1518 void TCMalloc_PageHeap::Dump(TCMalloc_Printer
* out
) {
1519 int nonempty_sizes
= 0;
1520 for (int s
= 0; s
< kMaxPages
; s
++) {
1521 if (!DLL_IsEmpty(&free_
[s
].normal
) || !DLL_IsEmpty(&free_
[s
].returned
)) {
1525 out
->printf("------------------------------------------------\n");
1526 out
->printf("PageHeap: %d sizes; %6.1f MB free\n",
1527 nonempty_sizes
, PagesToMB(free_pages_
));
1528 out
->printf("------------------------------------------------\n");
1529 uint64_t total_normal
= 0;
1530 uint64_t total_returned
= 0;
1531 for (int s
= 0; s
< kMaxPages
; s
++) {
1532 const int n_length
= DLL_Length(&free_
[s
].normal
);
1533 const int r_length
= DLL_Length(&free_
[s
].returned
);
1534 if (n_length
+ r_length
> 0) {
1535 uint64_t n_pages
= s
* n_length
;
1536 uint64_t r_pages
= s
* r_length
;
1537 total_normal
+= n_pages
;
1538 total_returned
+= r_pages
;
1539 out
->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1540 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1542 (n_length
+ r_length
),
1543 PagesToMB(n_pages
+ r_pages
),
1544 PagesToMB(total_normal
+ total_returned
),
1546 PagesToMB(total_returned
));
1550 uint64_t n_pages
= 0;
1551 uint64_t r_pages
= 0;
1554 out
->printf("Normal large spans:\n");
1555 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
1556 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1557 s
->length
, PagesToMB(s
->length
));
1558 n_pages
+= s
->length
;
1561 out
->printf("Unmapped large spans:\n");
1562 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
1563 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1564 s
->length
, PagesToMB(s
->length
));
1565 r_pages
+= s
->length
;
1568 total_normal
+= n_pages
;
1569 total_returned
+= r_pages
;
1570 out
->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1571 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1572 (n_spans
+ r_spans
),
1573 PagesToMB(n_pages
+ r_pages
),
1574 PagesToMB(total_normal
+ total_returned
),
1576 PagesToMB(total_returned
));
1580 bool TCMalloc_PageHeap::GrowHeap(Length n
) {
1581 ASSERT(kMaxPages
>= kMinSystemAlloc
);
1582 if (n
> kMaxValidPages
) return false;
1583 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
1585 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1588 // Try growing just "n" pages
1590 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1592 if (ptr
== NULL
) return false;
1594 ask
= actual_size
>> kPageShift
;
1596 uint64_t old_system_bytes
= system_bytes_
;
1597 system_bytes_
+= (ask
<< kPageShift
);
1598 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1601 // If we have already a lot of pages allocated, just pre allocate a bunch of
1602 // memory for the page map. This prevents fragmentation by pagemap metadata
1603 // when a program keeps allocating and freeing large blocks.
1605 if (old_system_bytes
< kPageMapBigAllocationThreshold
1606 && system_bytes_
>= kPageMapBigAllocationThreshold
) {
1607 pagemap_
.PreallocateMoreMemory();
1610 // Make sure pagemap_ has entries for all of the new pages.
1611 // Plus ensure one before and one after so coalescing code
1612 // does not need bounds-checking.
1613 if (pagemap_
.Ensure(p
-1, ask
+2)) {
1614 // Pretend the new area is allocated and then Delete() it to
1615 // cause any necessary coalescing to occur.
1617 // We do not adjust free_pages_ here since Delete() will do it for us.
1618 Span
* span
= NewSpan(p
, ask
);
1624 // We could not allocate memory within "pagemap_"
1625 // TODO: Once we can return memory to the system, return the new span
1630 bool TCMalloc_PageHeap::Check() {
1631 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
1632 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
1633 CheckList(&large_
.normal
, kMaxPages
, 1000000000);
1634 CheckList(&large_
.returned
, kMaxPages
, 1000000000);
1635 for (Length s
= 1; s
< kMaxPages
; s
++) {
1636 CheckList(&free_
[s
].normal
, s
, s
);
1637 CheckList(&free_
[s
].returned
, s
, s
);
1643 bool TCMalloc_PageHeap::CheckList(Span
*, Length
, Length
) {
1647 bool TCMalloc_PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
) {
1648 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1649 CHECK_CONDITION(s
->free
);
1650 CHECK_CONDITION(s
->length
>= min_pages
);
1651 CHECK_CONDITION(s
->length
<= max_pages
);
1652 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
1653 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
1659 static void ReleaseFreeList(Span
* list
, Span
* returned
) {
1660 // Walk backwards through list so that when we push these
1661 // spans on the "returned" list, we preserve the order.
1662 while (!DLL_IsEmpty(list
)) {
1663 Span
* s
= list
->prev
;
1665 DLL_Prepend(returned
, s
);
1666 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1667 static_cast<size_t>(s
->length
<< kPageShift
));
1671 void TCMalloc_PageHeap::ReleaseFreePages() {
1672 for (Length s
= 0; s
< kMaxPages
; s
++) {
1673 ReleaseFreeList(&free_
[s
].normal
, &free_
[s
].returned
);
1675 ReleaseFreeList(&large_
.normal
, &large_
.returned
);
1679 //-------------------------------------------------------------------
1681 //-------------------------------------------------------------------
1683 class TCMalloc_ThreadCache_FreeList
{
1685 void* list_
; // Linked list of nodes
1686 uint16_t length_
; // Current length
1687 uint16_t lowater_
; // Low water mark for list length
1696 // Return current length of list
1697 int length() const {
1702 bool empty() const {
1703 return list_
== NULL
;
1706 // Low-water mark management
1707 int lowwatermark() const { return lowater_
; }
1708 void clear_lowwatermark() { lowater_
= length_
; }
1710 ALWAYS_INLINE
void Push(void* ptr
) {
1711 SLL_Push(&list_
, ptr
);
1715 void PushRange(int N
, void *start
, void *end
) {
1716 SLL_PushRange(&list_
, start
, end
);
1717 length_
= length_
+ static_cast<uint16_t>(N
);
1720 void PopRange(int N
, void **start
, void **end
) {
1721 SLL_PopRange(&list_
, N
, start
, end
);
1722 ASSERT(length_
>= N
);
1723 length_
= length_
- static_cast<uint16_t>(N
);
1724 if (length_
< lowater_
) lowater_
= length_
;
1727 ALWAYS_INLINE
void* Pop() {
1728 ASSERT(list_
!= NULL
);
1730 if (length_
< lowater_
) lowater_
= length_
;
1731 return SLL_Pop(&list_
);
1735 template <class Finder
, class Reader
>
1736 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1738 for (void* nextObject
= list_
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1739 finder
.visit(nextObject
);
1744 //-------------------------------------------------------------------
1745 // Data kept per thread
1746 //-------------------------------------------------------------------
1748 class TCMalloc_ThreadCache
{
1750 typedef TCMalloc_ThreadCache_FreeList FreeList
;
1752 typedef DWORD ThreadIdentifier
;
1754 typedef pthread_t ThreadIdentifier
;
1757 size_t size_
; // Combined size of data
1758 ThreadIdentifier tid_
; // Which thread owns it
1759 bool in_setspecific_
; // Called pthread_setspecific?
1760 FreeList list_
[kNumClasses
]; // Array indexed by size-class
1762 // We sample allocations, biased by the size of the allocation
1763 uint32_t rnd_
; // Cheap random number generator
1764 size_t bytes_until_sample_
; // Bytes until we sample next
1766 // Allocate a new heap. REQUIRES: pageheap_lock is held.
1767 static inline TCMalloc_ThreadCache
* NewHeap(ThreadIdentifier tid
);
1769 // Use only as pthread thread-specific destructor function.
1770 static void DestroyThreadCache(void* ptr
);
1772 // All ThreadCache objects are kept in a linked list (for stats collection)
1773 TCMalloc_ThreadCache
* next_
;
1774 TCMalloc_ThreadCache
* prev_
;
1776 void Init(ThreadIdentifier tid
);
1779 // Accessors (mostly just for printing stats)
1780 int freelist_length(size_t cl
) const { return list_
[cl
].length(); }
1782 // Total byte size in cache
1783 size_t Size() const { return size_
; }
1785 void* Allocate(size_t size
);
1786 void Deallocate(void* ptr
, size_t size_class
);
1788 void FetchFromCentralCache(size_t cl
, size_t allocationSize
);
1789 void ReleaseToCentralCache(size_t cl
, int N
);
1793 // Record allocation of "k" bytes. Return true iff allocation
1794 // should be sampled
1795 bool SampleAllocation(size_t k
);
1797 // Pick next sampling point
1798 void PickNextSample(size_t k
);
1800 static void InitModule();
1801 static void InitTSD();
1802 static TCMalloc_ThreadCache
* GetThreadHeap();
1803 static TCMalloc_ThreadCache
* GetCache();
1804 static TCMalloc_ThreadCache
* GetCacheIfPresent();
1805 static TCMalloc_ThreadCache
* CreateCacheIfNecessary();
1806 static void DeleteCache(TCMalloc_ThreadCache
* heap
);
1807 static void BecomeIdle();
1808 static void RecomputeThreadCacheSize();
1811 template <class Finder
, class Reader
>
1812 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1814 for (unsigned sizeClass
= 0; sizeClass
< kNumClasses
; sizeClass
++)
1815 list_
[sizeClass
].enumerateFreeObjects(finder
, reader
);
1820 //-------------------------------------------------------------------
1821 // Data kept per size-class in central cache
1822 //-------------------------------------------------------------------
1824 class TCMalloc_Central_FreeList
{
1826 void Init(size_t cl
);
1828 // These methods all do internal locking.
1830 // Insert the specified range into the central freelist. N is the number of
1831 // elements in the range.
1832 void InsertRange(void *start
, void *end
, int N
);
1834 // Returns the actual number of fetched elements into N.
1835 void RemoveRange(void **start
, void **end
, int *N
);
1837 // Returns the number of free objects in cache.
1839 SpinLockHolder
h(&lock_
);
1843 // Returns the number of free objects in the transfer cache.
1845 SpinLockHolder
h(&lock_
);
1846 return used_slots_
* num_objects_to_move
[size_class_
];
1850 template <class Finder
, class Reader
>
1851 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
, TCMalloc_Central_FreeList
* remoteCentralFreeList
)
1853 for (Span
* span
= &empty_
; span
&& span
!= &empty_
; span
= (span
->next
? reader(span
->next
) : 0))
1854 ASSERT(!span
->objects
);
1856 ASSERT(!nonempty_
.objects
);
1857 static const ptrdiff_t nonemptyOffset
= reinterpret_cast<const char*>(&nonempty_
) - reinterpret_cast<const char*>(this);
1859 Span
* remoteNonempty
= reinterpret_cast<Span
*>(reinterpret_cast<char*>(remoteCentralFreeList
) + nonemptyOffset
);
1860 Span
* remoteSpan
= nonempty_
.next
;
1862 for (Span
* span
= reader(remoteSpan
); span
&& remoteSpan
!= remoteNonempty
; remoteSpan
= span
->next
, span
= (span
->next
? reader(span
->next
) : 0)) {
1863 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1864 finder
.visit(nextObject
);
1870 // REQUIRES: lock_ is held
1871 // Remove object from cache and return.
1872 // Return NULL if no free entries in cache.
1873 void* FetchFromSpans();
1875 // REQUIRES: lock_ is held
1876 // Remove object from cache and return. Fetches
1877 // from pageheap if cache is empty. Only returns
1878 // NULL on allocation failure.
1879 void* FetchFromSpansSafe();
1881 // REQUIRES: lock_ is held
1882 // Release a linked list of objects to spans.
1883 // May temporarily release lock_.
1884 void ReleaseListToSpans(void *start
);
1886 // REQUIRES: lock_ is held
1887 // Release an object to spans.
1888 // May temporarily release lock_.
1889 void ReleaseToSpans(void* object
);
1891 // REQUIRES: lock_ is held
1892 // Populate cache by fetching from the page heap.
1893 // May temporarily release lock_.
1896 // REQUIRES: lock is held.
1897 // Tries to make room for a TCEntry. If the cache is full it will try to
1898 // expand it at the cost of some other cache size. Return false if there is
1900 bool MakeCacheSpace();
1902 // REQUIRES: lock_ for locked_size_class is held.
1903 // Picks a "random" size class to steal TCEntry slot from. In reality it
1904 // just iterates over the sizeclasses but does so without taking a lock.
1905 // Returns true on success.
1906 // May temporarily lock a "random" size class.
1907 static bool EvictRandomSizeClass(size_t locked_size_class
, bool force
);
1909 // REQUIRES: lock_ is *not* held.
1910 // Tries to shrink the Cache. If force is true it will relase objects to
1911 // spans if it allows it to shrink the cache. Return false if it failed to
1912 // shrink the cache. Decrements cache_size_ on succeess.
1913 // May temporarily take lock_. If it takes lock_, the locked_size_class
1914 // lock is released to the thread from holding two size class locks
1915 // concurrently which could lead to a deadlock.
1916 bool ShrinkCache(int locked_size_class
, bool force
);
1918 // This lock protects all the data members. cached_entries and cache_size_
1919 // may be looked at without holding the lock.
1922 // We keep linked lists of empty and non-empty spans.
1923 size_t size_class_
; // My size class
1924 Span empty_
; // Dummy header for list of empty spans
1925 Span nonempty_
; // Dummy header for list of non-empty spans
1926 size_t counter_
; // Number of free objects in cache entry
1928 // Here we reserve space for TCEntry cache slots. Since one size class can
1929 // end up getting all the TCEntries quota in the system we just preallocate
1930 // sufficient number of entries here.
1931 TCEntry tc_slots_
[kNumTransferEntries
];
1933 // Number of currently used cached entries in tc_slots_. This variable is
1934 // updated under a lock but can be read without one.
1935 int32_t used_slots_
;
1936 // The current number of slots for this size class. This is an
1937 // adaptive value that is increased if there is lots of traffic
1938 // on a given size class.
1939 int32_t cache_size_
;
1942 // Pad each CentralCache object to multiple of 64 bytes
1943 class TCMalloc_Central_FreeListPadded
: public TCMalloc_Central_FreeList
{
1945 char pad_
[(64 - (sizeof(TCMalloc_Central_FreeList
) % 64)) % 64];
1948 //-------------------------------------------------------------------
1950 //-------------------------------------------------------------------
1952 // Central cache -- a collection of free-lists, one per size-class.
1953 // We have a separate lock per free-list to reduce contention.
1954 static TCMalloc_Central_FreeListPadded central_cache
[kNumClasses
];
1956 // Page-level allocator
1957 static SpinLock pageheap_lock
= SPINLOCK_INITIALIZER
;
1960 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned
));
1962 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)];
1964 static bool phinited
= false;
1966 // Avoid extra level of indirection by making "pageheap" be just an alias
1967 // of pageheap_memory.
1970 TCMalloc_PageHeap
* m_pageHeap
;
1973 static inline TCMalloc_PageHeap
* getPageHeap()
1975 PageHeapUnion u
= { &pageheap_memory
[0] };
1976 return u
.m_pageHeap
;
1979 #define pageheap getPageHeap()
1981 // If TLS is available, we also store a copy
1982 // of the per-thread object in a __thread variable
1983 // since __thread variables are faster to read
1984 // than pthread_getspecific(). We still need
1985 // pthread_setspecific() because __thread
1986 // variables provide no way to run cleanup
1987 // code when a thread is destroyed.
1989 static __thread TCMalloc_ThreadCache
*threadlocal_heap
;
1991 // Thread-specific key. Initialization here is somewhat tricky
1992 // because some Linux startup code invokes malloc() before it
1993 // is in a good enough state to handle pthread_keycreate().
1994 // Therefore, we use TSD keys only after tsd_inited is set to true.
1995 // Until then, we use a slow path to get the heap object.
1996 static bool tsd_inited
= false;
1997 static pthread_key_t heap_key
;
1999 DWORD tlsIndex
= TLS_OUT_OF_INDEXES
;
2002 static ALWAYS_INLINE
void setThreadHeap(TCMalloc_ThreadCache
* heap
)
2004 // still do pthread_setspecific when using MSVC fast TLS to
2005 // benefit from the delete callback.
2006 pthread_setspecific(heap_key
, heap
);
2008 TlsSetValue(tlsIndex
, heap
);
2012 // Allocator for thread heaps
2013 static PageHeapAllocator
<TCMalloc_ThreadCache
> threadheap_allocator
;
2015 // Linked list of heap objects. Protected by pageheap_lock.
2016 static TCMalloc_ThreadCache
* thread_heaps
= NULL
;
2017 static int thread_heap_count
= 0;
2019 // Overall thread cache size. Protected by pageheap_lock.
2020 static size_t overall_thread_cache_size
= kDefaultOverallThreadCacheSize
;
2022 // Global per-thread cache size. Writes are protected by
2023 // pageheap_lock. Reads are done without any locking, which should be
2024 // fine as long as size_t can be written atomically and we don't place
2025 // invariants between this variable and other pieces of state.
2026 static volatile size_t per_thread_cache_size
= kMaxThreadCacheSize
;
2028 //-------------------------------------------------------------------
2029 // Central cache implementation
2030 //-------------------------------------------------------------------
2032 void TCMalloc_Central_FreeList::Init(size_t cl
) {
2036 DLL_Init(&nonempty_
);
2041 ASSERT(cache_size_
<= kNumTransferEntries
);
2044 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start
) {
2046 void *next
= SLL_Next(start
);
2047 ReleaseToSpans(start
);
2052 ALWAYS_INLINE
void TCMalloc_Central_FreeList::ReleaseToSpans(void* object
) {
2053 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
2054 Span
* span
= pageheap
->GetDescriptor(p
);
2055 ASSERT(span
!= NULL
);
2056 ASSERT(span
->refcount
> 0);
2058 // If span is empty, move it to non-empty list
2059 if (span
->objects
== NULL
) {
2061 DLL_Prepend(&nonempty_
, span
);
2062 Event(span
, 'N', 0);
2065 // The following check is expensive, so it is disabled by default
2067 // Check that object does not occur in list
2069 for (void* p
= span
->objects
; p
!= NULL
; p
= *((void**) p
)) {
2070 ASSERT(p
!= object
);
2073 ASSERT(got
+ span
->refcount
==
2074 (span
->length
<<kPageShift
)/ByteSizeForClass(span
->sizeclass
));
2079 if (span
->refcount
== 0) {
2080 Event(span
, '#', 0);
2081 counter_
-= (span
->length
<<kPageShift
) / ByteSizeForClass(span
->sizeclass
);
2084 // Release central list lock while operating on pageheap
2087 SpinLockHolder
h(&pageheap_lock
);
2088 pageheap
->Delete(span
);
2092 *(reinterpret_cast<void**>(object
)) = span
->objects
;
2093 span
->objects
= object
;
2097 ALWAYS_INLINE
bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2098 size_t locked_size_class
, bool force
) {
2099 static int race_counter
= 0;
2100 int t
= race_counter
++; // Updated without a lock, but who cares.
2101 if (t
>= static_cast<int>(kNumClasses
)) {
2102 while (t
>= static_cast<int>(kNumClasses
)) {
2108 ASSERT(t
< static_cast<int>(kNumClasses
));
2109 if (t
== static_cast<int>(locked_size_class
)) return false;
2110 return central_cache
[t
].ShrinkCache(static_cast<int>(locked_size_class
), force
);
2113 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2114 // Is there room in the cache?
2115 if (used_slots_
< cache_size_
) return true;
2116 // Check if we can expand this cache?
2117 if (cache_size_
== kNumTransferEntries
) return false;
2118 // Ok, we'll try to grab an entry from some other size class.
2119 if (EvictRandomSizeClass(size_class_
, false) ||
2120 EvictRandomSizeClass(size_class_
, true)) {
2121 // Succeeded in evicting, we're going to make our cache larger.
2130 class LockInverter
{
2132 SpinLock
*held_
, *temp_
;
2134 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
2135 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
2136 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
2140 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class
, bool force
) {
2141 // Start with a quick check without taking a lock.
2142 if (cache_size_
== 0) return false;
2143 // We don't evict from a full cache unless we are 'forcing'.
2144 if (force
== false && used_slots_
== cache_size_
) return false;
2146 // Grab lock, but first release the other lock held by this thread. We use
2147 // the lock inverter to ensure that we never hold two size class locks
2148 // concurrently. That can create a deadlock because there is no well
2149 // defined nesting order.
2150 LockInverter
li(¢ral_cache
[locked_size_class
].lock_
, &lock_
);
2151 ASSERT(used_slots_
<= cache_size_
);
2152 ASSERT(0 <= cache_size_
);
2153 if (cache_size_
== 0) return false;
2154 if (used_slots_
== cache_size_
) {
2155 if (force
== false) return false;
2156 // ReleaseListToSpans releases the lock, so we have to make all the
2157 // updates to the central list before calling it.
2160 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
2167 void TCMalloc_Central_FreeList::InsertRange(void *start
, void *end
, int N
) {
2168 SpinLockHolder
h(&lock_
);
2169 if (N
== num_objects_to_move
[size_class_
] &&
2171 int slot
= used_slots_
++;
2173 ASSERT(slot
< kNumTransferEntries
);
2174 TCEntry
*entry
= &tc_slots_
[slot
];
2175 entry
->head
= start
;
2179 ReleaseListToSpans(start
);
2182 void TCMalloc_Central_FreeList::RemoveRange(void **start
, void **end
, int *N
) {
2186 SpinLockHolder
h(&lock_
);
2187 if (num
== num_objects_to_move
[size_class_
] && used_slots_
> 0) {
2188 int slot
= --used_slots_
;
2190 TCEntry
*entry
= &tc_slots_
[slot
];
2191 *start
= entry
->head
;
2196 // TODO: Prefetch multiple TCEntries?
2197 void *tail
= FetchFromSpansSafe();
2199 // We are completely out of memory.
2200 *start
= *end
= NULL
;
2205 SLL_SetNext(tail
, NULL
);
2208 while (count
< num
) {
2209 void *t
= FetchFromSpans();
2220 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2221 void *t
= FetchFromSpans();
2224 t
= FetchFromSpans();
2229 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2230 if (DLL_IsEmpty(&nonempty_
)) return NULL
;
2231 Span
* span
= nonempty_
.next
;
2233 ASSERT(span
->objects
!= NULL
);
2234 ASSERT_SPAN_COMMITTED(span
);
2236 void* result
= span
->objects
;
2237 span
->objects
= *(reinterpret_cast<void**>(result
));
2238 if (span
->objects
== NULL
) {
2239 // Move to empty list
2241 DLL_Prepend(&empty_
, span
);
2242 Event(span
, 'E', 0);
2248 // Fetch memory from the system and add to the central cache freelist.
2249 ALWAYS_INLINE
void TCMalloc_Central_FreeList::Populate() {
2250 // Release central list lock while operating on pageheap
2252 const size_t npages
= class_to_pages
[size_class_
];
2256 SpinLockHolder
h(&pageheap_lock
);
2257 span
= pageheap
->New(npages
);
2258 if (span
) pageheap
->RegisterSizeClass(span
, size_class_
);
2261 MESSAGE("allocation failed: %d\n", errno
);
2265 ASSERT_SPAN_COMMITTED(span
);
2266 ASSERT(span
->length
== npages
);
2267 // Cache sizeclass info eagerly. Locking is not necessary.
2268 // (Instead of being eager, we could just replace any stale info
2269 // about this span, but that seems to be no better in practice.)
2270 for (size_t i
= 0; i
< npages
; i
++) {
2271 pageheap
->CacheSizeClass(span
->start
+ i
, size_class_
);
2274 // Split the block into pieces and add to the free-list
2275 // TODO: coloring of objects to avoid cache conflicts?
2276 void** tail
= &span
->objects
;
2277 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
2278 char* limit
= ptr
+ (npages
<< kPageShift
);
2279 const size_t size
= ByteSizeForClass(size_class_
);
2282 while ((nptr
= ptr
+ size
) <= limit
) {
2284 tail
= reinterpret_cast<void**>(ptr
);
2288 ASSERT(ptr
<= limit
);
2290 span
->refcount
= 0; // No sub-object in use yet
2292 // Add span to list of non-empty spans
2294 DLL_Prepend(&nonempty_
, span
);
2298 //-------------------------------------------------------------------
2299 // TCMalloc_ThreadCache implementation
2300 //-------------------------------------------------------------------
2302 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k
) {
2303 if (bytes_until_sample_
< k
) {
2307 bytes_until_sample_
-= k
;
2312 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid
) {
2317 in_setspecific_
= false;
2318 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2322 // Initialize RNG -- run it for a bit to get to good values
2323 bytes_until_sample_
= 0;
2324 rnd_
= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2325 for (int i
= 0; i
< 100; i
++) {
2326 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter
* 2));
2330 void TCMalloc_ThreadCache::Cleanup() {
2331 // Put unused memory back into central cache
2332 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2333 if (list_
[cl
].length() > 0) {
2334 ReleaseToCentralCache(cl
, list_
[cl
].length());
2339 ALWAYS_INLINE
void* TCMalloc_ThreadCache::Allocate(size_t size
) {
2340 ASSERT(size
<= kMaxSize
);
2341 const size_t cl
= SizeClass(size
);
2342 FreeList
* list
= &list_
[cl
];
2343 size_t allocationSize
= ByteSizeForClass(cl
);
2344 if (list
->empty()) {
2345 FetchFromCentralCache(cl
, allocationSize
);
2346 if (list
->empty()) return NULL
;
2348 size_
-= allocationSize
;
2352 inline void TCMalloc_ThreadCache::Deallocate(void* ptr
, size_t cl
) {
2353 size_
+= ByteSizeForClass(cl
);
2354 FreeList
* list
= &list_
[cl
];
2356 // If enough data is free, put back into central cache
2357 if (list
->length() > kMaxFreeListLength
) {
2358 ReleaseToCentralCache(cl
, num_objects_to_move
[cl
]);
2360 if (size_
>= per_thread_cache_size
) Scavenge();
2363 // Remove some objects of class "cl" from central cache and add to thread heap
2364 ALWAYS_INLINE
void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl
, size_t allocationSize
) {
2365 int fetch_count
= num_objects_to_move
[cl
];
2367 central_cache
[cl
].RemoveRange(&start
, &end
, &fetch_count
);
2368 list_
[cl
].PushRange(fetch_count
, start
, end
);
2369 size_
+= allocationSize
* fetch_count
;
2372 // Remove some objects of class "cl" from thread heap and add to central cache
2373 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl
, int N
) {
2375 FreeList
* src
= &list_
[cl
];
2376 if (N
> src
->length()) N
= src
->length();
2377 size_
-= N
*ByteSizeForClass(cl
);
2379 // We return prepackaged chains of the correct size to the central cache.
2380 // TODO: Use the same format internally in the thread caches?
2381 int batch_size
= num_objects_to_move
[cl
];
2382 while (N
> batch_size
) {
2384 src
->PopRange(batch_size
, &head
, &tail
);
2385 central_cache
[cl
].InsertRange(head
, tail
, batch_size
);
2389 src
->PopRange(N
, &head
, &tail
);
2390 central_cache
[cl
].InsertRange(head
, tail
, N
);
2393 // Release idle memory to the central cache
2394 inline void TCMalloc_ThreadCache::Scavenge() {
2395 // If the low-water mark for the free list is L, it means we would
2396 // not have had to allocate anything from the central cache even if
2397 // we had reduced the free list size by L. We aim to get closer to
2398 // that situation by dropping L/2 nodes from the free list. This
2399 // may not release much memory, but if so we will call scavenge again
2400 // pretty soon and the low-water marks will be high on that call.
2401 //int64 start = CycleClock::Now();
2403 for (size_t cl
= 0; cl
< kNumClasses
; cl
++) {
2404 FreeList
* list
= &list_
[cl
];
2405 const int lowmark
= list
->lowwatermark();
2407 const int drop
= (lowmark
> 1) ? lowmark
/2 : 1;
2408 ReleaseToCentralCache(cl
, drop
);
2410 list
->clear_lowwatermark();
2413 //int64 finish = CycleClock::Now();
2415 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2418 void TCMalloc_ThreadCache::PickNextSample(size_t k
) {
2419 // Make next "random" number
2420 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2421 static const uint32_t kPoly
= (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2423 rnd_
= (r
<< 1) ^ ((static_cast<int32_t>(r
) >> 31) & kPoly
);
2425 // Next point is "rnd_ % (sample_period)". I.e., average
2426 // increment is "sample_period/2".
2427 const int flag_value
= static_cast<int>(FLAGS_tcmalloc_sample_parameter
);
2428 static int last_flag_value
= -1;
2430 if (flag_value
!= last_flag_value
) {
2431 SpinLockHolder
h(&sample_period_lock
);
2433 for (i
= 0; i
< (static_cast<int>(sizeof(primes_list
)/sizeof(primes_list
[0])) - 1); i
++) {
2434 if (primes_list
[i
] >= flag_value
) {
2438 sample_period
= primes_list
[i
];
2439 last_flag_value
= flag_value
;
2442 bytes_until_sample_
+= rnd_
% sample_period
;
2444 if (k
> (static_cast<size_t>(-1) >> 2)) {
2445 // If the user has asked for a huge allocation then it is possible
2446 // for the code below to loop infinitely. Just return (note that
2447 // this throws off the sampling accuracy somewhat, but a user who
2448 // is allocating more than 1G of memory at a time can live with a
2449 // minor inaccuracy in profiling of small allocations, and also
2450 // would rather not wait for the loop below to terminate).
2454 while (bytes_until_sample_
< k
) {
2455 // Increase bytes_until_sample_ by enough average sampling periods
2456 // (sample_period >> 1) to allow us to sample past the current
2458 bytes_until_sample_
+= (sample_period
>> 1);
2461 bytes_until_sample_
-= k
;
2464 void TCMalloc_ThreadCache::InitModule() {
2465 // There is a slight potential race here because of double-checked
2466 // locking idiom. However, as long as the program does a small
2467 // allocation before switching to multi-threaded mode, we will be
2468 // fine. We increase the chances of doing such a small allocation
2469 // by doing one in the constructor of the module_enter_exit_hook
2470 // object declared below.
2471 SpinLockHolder
h(&pageheap_lock
);
2477 threadheap_allocator
.Init();
2478 span_allocator
.Init();
2479 span_allocator
.New(); // Reduce cache conflicts
2480 span_allocator
.New(); // Reduce cache conflicts
2481 stacktrace_allocator
.Init();
2482 DLL_Init(&sampled_objects
);
2483 for (size_t i
= 0; i
< kNumClasses
; ++i
) {
2484 central_cache
[i
].Init(i
);
2488 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
2489 FastMallocZone::init();
2494 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid
) {
2495 // Create the heap and add it to the linked list
2496 TCMalloc_ThreadCache
*heap
= threadheap_allocator
.New();
2498 heap
->next_
= thread_heaps
;
2500 if (thread_heaps
!= NULL
) thread_heaps
->prev_
= heap
;
2501 thread_heaps
= heap
;
2502 thread_heap_count
++;
2503 RecomputeThreadCacheSize();
2507 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetThreadHeap() {
2509 // __thread is faster, but only when the kernel supports it
2510 if (KernelSupportsTLS())
2511 return threadlocal_heap
;
2512 #elif COMPILER(MSVC)
2513 return static_cast<TCMalloc_ThreadCache
*>(TlsGetValue(tlsIndex
));
2515 return static_cast<TCMalloc_ThreadCache
*>(pthread_getspecific(heap_key
));
2519 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCache() {
2520 TCMalloc_ThreadCache
* ptr
= NULL
;
2524 ptr
= GetThreadHeap();
2526 if (ptr
== NULL
) ptr
= CreateCacheIfNecessary();
2530 // In deletion paths, we do not try to create a thread-cache. This is
2531 // because we may be in the thread destruction code and may have
2532 // already cleaned up the cache for this thread.
2533 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCacheIfPresent() {
2534 if (!tsd_inited
) return NULL
;
2535 void* const p
= GetThreadHeap();
2536 return reinterpret_cast<TCMalloc_ThreadCache
*>(p
);
2539 void TCMalloc_ThreadCache::InitTSD() {
2540 ASSERT(!tsd_inited
);
2541 pthread_key_create(&heap_key
, DestroyThreadCache
);
2543 tlsIndex
= TlsAlloc();
2548 // We may have used a fake pthread_t for the main thread. Fix it.
2550 memset(&zero
, 0, sizeof(zero
));
2553 SpinLockHolder
h(&pageheap_lock
);
2555 ASSERT(pageheap_lock
.IsHeld());
2557 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2560 h
->tid_
= GetCurrentThreadId();
2563 if (pthread_equal(h
->tid_
, zero
)) {
2564 h
->tid_
= pthread_self();
2570 TCMalloc_ThreadCache
* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2571 // Initialize per-thread data if necessary
2572 TCMalloc_ThreadCache
* heap
= NULL
;
2574 SpinLockHolder
h(&pageheap_lock
);
2581 me
= GetCurrentThreadId();
2584 // Early on in glibc's life, we cannot even call pthread_self()
2587 memset(&me
, 0, sizeof(me
));
2589 me
= pthread_self();
2593 // This may be a recursive malloc call from pthread_setspecific()
2594 // In that case, the heap for this thread has already been created
2595 // and added to the linked list. So we search for that first.
2596 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2598 if (h
->tid_
== me
) {
2600 if (pthread_equal(h
->tid_
, me
)) {
2607 if (heap
== NULL
) heap
= NewHeap(me
);
2610 // We call pthread_setspecific() outside the lock because it may
2611 // call malloc() recursively. The recursive call will never get
2612 // here again because it will find the already allocated heap in the
2613 // linked list of heaps.
2614 if (!heap
->in_setspecific_
&& tsd_inited
) {
2615 heap
->in_setspecific_
= true;
2616 setThreadHeap(heap
);
2621 void TCMalloc_ThreadCache::BecomeIdle() {
2622 if (!tsd_inited
) return; // No caches yet
2623 TCMalloc_ThreadCache
* heap
= GetThreadHeap();
2624 if (heap
== NULL
) return; // No thread cache to remove
2625 if (heap
->in_setspecific_
) return; // Do not disturb the active caller
2627 heap
->in_setspecific_
= true;
2628 pthread_setspecific(heap_key
, NULL
);
2630 // Also update the copy in __thread
2631 threadlocal_heap
= NULL
;
2633 heap
->in_setspecific_
= false;
2634 if (GetThreadHeap() == heap
) {
2635 // Somehow heap got reinstated by a recursive call to malloc
2636 // from pthread_setspecific. We give up in this case.
2640 // We can now get rid of the heap
2644 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr
) {
2645 // Note that "ptr" cannot be NULL since pthread promises not
2646 // to invoke the destructor on NULL values, but for safety,
2648 if (ptr
== NULL
) return;
2650 // Prevent fast path of GetThreadHeap() from returning heap.
2651 threadlocal_heap
= NULL
;
2653 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache
*>(ptr
));
2656 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache
* heap
) {
2657 // Remove all memory from heap
2660 // Remove from linked list
2661 SpinLockHolder
h(&pageheap_lock
);
2662 if (heap
->next_
!= NULL
) heap
->next_
->prev_
= heap
->prev_
;
2663 if (heap
->prev_
!= NULL
) heap
->prev_
->next_
= heap
->next_
;
2664 if (thread_heaps
== heap
) thread_heaps
= heap
->next_
;
2665 thread_heap_count
--;
2666 RecomputeThreadCacheSize();
2668 threadheap_allocator
.Delete(heap
);
2671 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
2672 // Divide available space across threads
2673 int n
= thread_heap_count
> 0 ? thread_heap_count
: 1;
2674 size_t space
= overall_thread_cache_size
/ n
;
2676 // Limit to allowed range
2677 if (space
< kMinThreadCacheSize
) space
= kMinThreadCacheSize
;
2678 if (space
> kMaxThreadCacheSize
) space
= kMaxThreadCacheSize
;
2680 per_thread_cache_size
= space
;
2683 void TCMalloc_ThreadCache::Print() const {
2684 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2685 MESSAGE(" %5" PRIuS
" : %4d len; %4d lo\n",
2686 ByteSizeForClass(cl
),
2688 list_
[cl
].lowwatermark());
2692 // Extract interesting stats
2693 struct TCMallocStats
{
2694 uint64_t system_bytes
; // Bytes alloced from system
2695 uint64_t thread_bytes
; // Bytes in thread caches
2696 uint64_t central_bytes
; // Bytes in central cache
2697 uint64_t transfer_bytes
; // Bytes in central transfer cache
2698 uint64_t pageheap_bytes
; // Bytes in page heap
2699 uint64_t metadata_bytes
; // Bytes alloced for metadata
2703 // Get stats into "r". Also get per-size-class counts if class_count != NULL
2704 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
) {
2705 r
->central_bytes
= 0;
2706 r
->transfer_bytes
= 0;
2707 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2708 const int length
= central_cache
[cl
].length();
2709 const int tc_length
= central_cache
[cl
].tc_length();
2710 r
->central_bytes
+= static_cast<uint64_t>(ByteSizeForClass(cl
)) * length
;
2711 r
->transfer_bytes
+=
2712 static_cast<uint64_t>(ByteSizeForClass(cl
)) * tc_length
;
2713 if (class_count
) class_count
[cl
] = length
+ tc_length
;
2716 // Add stats from per-thread heaps
2717 r
->thread_bytes
= 0;
2719 SpinLockHolder
h(&pageheap_lock
);
2720 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2721 r
->thread_bytes
+= h
->Size();
2723 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2724 class_count
[cl
] += h
->freelist_length(cl
);
2731 SpinLockHolder
h(&pageheap_lock
);
2732 r
->system_bytes
= pageheap
->SystemBytes();
2733 r
->metadata_bytes
= metadata_system_bytes
;
2734 r
->pageheap_bytes
= pageheap
->FreeBytes();
2740 // WRITE stats to "out"
2741 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
2742 TCMallocStats stats
;
2743 uint64_t class_count
[kNumClasses
];
2744 ExtractStats(&stats
, (level
>= 2 ? class_count
: NULL
));
2747 out
->printf("------------------------------------------------\n");
2748 uint64_t cumulative
= 0;
2749 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2750 if (class_count
[cl
] > 0) {
2751 uint64_t class_bytes
= class_count
[cl
] * ByteSizeForClass(cl
);
2752 cumulative
+= class_bytes
;
2753 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
2754 "%8" PRIu64
" objs; %5.1f MB; %5.1f cum MB\n",
2755 cl
, ByteSizeForClass(cl
),
2757 class_bytes
/ 1048576.0,
2758 cumulative
/ 1048576.0);
2762 SpinLockHolder
h(&pageheap_lock
);
2763 pageheap
->Dump(out
);
2766 const uint64_t bytes_in_use
= stats
.system_bytes
2767 - stats
.pageheap_bytes
2768 - stats
.central_bytes
2769 - stats
.transfer_bytes
2770 - stats
.thread_bytes
;
2772 out
->printf("------------------------------------------------\n"
2773 "MALLOC: %12" PRIu64
" Heap size\n"
2774 "MALLOC: %12" PRIu64
" Bytes in use by application\n"
2775 "MALLOC: %12" PRIu64
" Bytes free in page heap\n"
2776 "MALLOC: %12" PRIu64
" Bytes free in central cache\n"
2777 "MALLOC: %12" PRIu64
" Bytes free in transfer cache\n"
2778 "MALLOC: %12" PRIu64
" Bytes free in thread caches\n"
2779 "MALLOC: %12" PRIu64
" Spans in use\n"
2780 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
2781 "MALLOC: %12" PRIu64
" Metadata allocated\n"
2782 "------------------------------------------------\n",
2785 stats
.pageheap_bytes
,
2786 stats
.central_bytes
,
2787 stats
.transfer_bytes
,
2789 uint64_t(span_allocator
.inuse()),
2790 uint64_t(threadheap_allocator
.inuse()),
2791 stats
.metadata_bytes
);
2794 static void PrintStats(int level
) {
2795 const int kBufferSize
= 16 << 10;
2796 char* buffer
= new char[kBufferSize
];
2797 TCMalloc_Printer
printer(buffer
, kBufferSize
);
2798 DumpStats(&printer
, level
);
2799 write(STDERR_FILENO
, buffer
, strlen(buffer
));
2803 static void** DumpStackTraces() {
2804 // Count how much space we need
2805 int needed_slots
= 0;
2807 SpinLockHolder
h(&pageheap_lock
);
2808 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2809 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2810 needed_slots
+= 3 + stack
->depth
;
2812 needed_slots
+= 100; // Slop in case sample grows
2813 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
2816 void** result
= new void*[needed_slots
];
2817 if (result
== NULL
) {
2818 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
2823 SpinLockHolder
h(&pageheap_lock
);
2825 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2826 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
2827 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2828 if (used_slots
+ 3 + stack
->depth
>= needed_slots
) {
2833 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
2834 result
[used_slots
+1] = reinterpret_cast<void*>(stack
->size
);
2835 result
[used_slots
+2] = reinterpret_cast<void*>(stack
->depth
);
2836 for (int d
= 0; d
< stack
->depth
; d
++) {
2837 result
[used_slots
+3+d
] = stack
->stack
[d
];
2839 used_slots
+= 3 + stack
->depth
;
2841 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
2848 // TCMalloc's support for extra malloc interfaces
2849 class TCMallocImplementation
: public MallocExtension
{
2851 virtual void GetStats(char* buffer
, int buffer_length
) {
2852 ASSERT(buffer_length
> 0);
2853 TCMalloc_Printer
printer(buffer
, buffer_length
);
2855 // Print level one stats unless lots of space is available
2856 if (buffer_length
< 10000) {
2857 DumpStats(&printer
, 1);
2859 DumpStats(&printer
, 2);
2863 virtual void** ReadStackTraces() {
2864 return DumpStackTraces();
2867 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
2868 ASSERT(name
!= NULL
);
2870 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
2871 TCMallocStats stats
;
2872 ExtractStats(&stats
, NULL
);
2873 *value
= stats
.system_bytes
2874 - stats
.thread_bytes
2875 - stats
.central_bytes
2876 - stats
.pageheap_bytes
;
2880 if (strcmp(name
, "generic.heap_size") == 0) {
2881 TCMallocStats stats
;
2882 ExtractStats(&stats
, NULL
);
2883 *value
= stats
.system_bytes
;
2887 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
2888 // We assume that bytes in the page heap are not fragmented too
2889 // badly, and are therefore available for allocation.
2890 SpinLockHolder
l(&pageheap_lock
);
2891 *value
= pageheap
->FreeBytes();
2895 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2896 SpinLockHolder
l(&pageheap_lock
);
2897 *value
= overall_thread_cache_size
;
2901 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
2902 TCMallocStats stats
;
2903 ExtractStats(&stats
, NULL
);
2904 *value
= stats
.thread_bytes
;
2911 virtual bool SetNumericProperty(const char* name
, size_t value
) {
2912 ASSERT(name
!= NULL
);
2914 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2915 // Clip the value to a reasonable range
2916 if (value
< kMinThreadCacheSize
) value
= kMinThreadCacheSize
;
2917 if (value
> (1<<30)) value
= (1<<30); // Limit to 1GB
2919 SpinLockHolder
l(&pageheap_lock
);
2920 overall_thread_cache_size
= static_cast<size_t>(value
);
2921 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
2928 virtual void MarkThreadIdle() {
2929 TCMalloc_ThreadCache::BecomeIdle();
2932 virtual void ReleaseFreeMemory() {
2933 SpinLockHolder
h(&pageheap_lock
);
2934 pageheap
->ReleaseFreePages();
2939 // The constructor allocates an object to ensure that initialization
2940 // runs before main(), and therefore we do not have a chance to become
2941 // multi-threaded before initialization. We also create the TSD key
2942 // here. Presumably by the time this constructor runs, glibc is in
2943 // good enough shape to handle pthread_key_create().
2945 // The constructor also takes the opportunity to tell STL to use
2946 // tcmalloc. We want to do this early, before construct time, so
2947 // all user STL allocations go through tcmalloc (which works really
2950 // The destructor prints stats when the program exits.
2951 class TCMallocGuard
{
2955 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
2956 // Check whether the kernel also supports TLS (needs to happen at runtime)
2957 CheckIfKernelSupportsTLS();
2960 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
2961 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
2965 TCMalloc_ThreadCache::InitTSD();
2968 MallocExtension::Register(new TCMallocImplementation
);
2974 const char* env
= getenv("MALLOCSTATS");
2976 int level
= atoi(env
);
2977 if (level
< 1) level
= 1;
2981 UnpatchWindowsFunctions();
2988 static TCMallocGuard module_enter_exit_hook
;
2992 //-------------------------------------------------------------------
2993 // Helpers for the exported routines below
2994 //-------------------------------------------------------------------
2998 static Span
* DoSampledAllocation(size_t size
) {
3000 // Grab the stack trace outside the heap lock
3002 tmp
.depth
= GetStackTrace(tmp
.stack
, kMaxStackDepth
, 1);
3005 SpinLockHolder
h(&pageheap_lock
);
3007 Span
*span
= pageheap
->New(pages(size
== 0 ? 1 : size
));
3012 // Allocate stack trace
3013 StackTrace
*stack
= stacktrace_allocator
.New();
3014 if (stack
== NULL
) {
3015 // Sampling failed because of lack of memory
3021 span
->objects
= stack
;
3022 DLL_Prepend(&sampled_objects
, span
);
3028 static inline bool CheckCachedSizeClass(void *ptr
) {
3029 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3030 size_t cached_value
= pageheap
->GetSizeClassIfCached(p
);
3031 return cached_value
== 0 ||
3032 cached_value
== pageheap
->GetDescriptor(p
)->sizeclass
;
3035 static inline void* CheckedMallocResult(void *result
)
3037 ASSERT(result
== 0 || CheckCachedSizeClass(result
));
3041 static inline void* SpanToMallocResult(Span
*span
) {
3042 ASSERT_SPAN_COMMITTED(span
);
3043 pageheap
->CacheSizeClass(span
->start
, 0);
3045 CheckedMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
3049 template <bool crashOnFailure
>
3051 static ALWAYS_INLINE
void* do_malloc(size_t size
) {
3055 ASSERT(!isForbidden());
3058 // The following call forces module initialization
3059 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3061 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
3062 Span
* span
= DoSampledAllocation(size
);
3064 ret
= SpanToMallocResult(span
);
3068 if (size
> kMaxSize
) {
3069 // Use page-level allocator
3070 SpinLockHolder
h(&pageheap_lock
);
3071 Span
* span
= pageheap
->New(pages(size
));
3073 ret
= SpanToMallocResult(span
);
3076 // The common case, and also the simplest. This just pops the
3077 // size-appropriate freelist, afer replenishing it if it's empty.
3078 ret
= CheckedMallocResult(heap
->Allocate(size
));
3082 if (crashOnFailure
) // This branch should be optimized out by the compiler.
3091 static ALWAYS_INLINE
void do_free(void* ptr
) {
3092 if (ptr
== NULL
) return;
3093 ASSERT(pageheap
!= NULL
); // Should not call free() before malloc()
3094 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
3096 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3099 span
= pageheap
->GetDescriptor(p
);
3100 cl
= span
->sizeclass
;
3101 pageheap
->CacheSizeClass(p
, cl
);
3104 #ifndef NO_TCMALLOC_SAMPLES
3105 ASSERT(!pageheap
->GetDescriptor(p
)->sample
);
3107 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCacheIfPresent();
3109 heap
->Deallocate(ptr
, cl
);
3111 // Delete directly into central cache
3112 SLL_SetNext(ptr
, NULL
);
3113 central_cache
[cl
].InsertRange(ptr
, ptr
, 1);
3116 SpinLockHolder
h(&pageheap_lock
);
3117 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
3118 ASSERT(span
!= NULL
&& span
->start
== p
);
3119 #ifndef NO_TCMALLOC_SAMPLES
3122 stacktrace_allocator
.Delete(reinterpret_cast<StackTrace
*>(span
->objects
));
3123 span
->objects
= NULL
;
3126 pageheap
->Delete(span
);
3131 // For use by exported routines below that want specific alignments
3133 // Note: this code can be slow, and can significantly fragment memory.
3134 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
3135 // not be invoked very often. This requirement simplifies our
3136 // implementation and allows us to tune for expected allocation
3138 static void* do_memalign(size_t align
, size_t size
) {
3139 ASSERT((align
& (align
- 1)) == 0);
3141 if (pageheap
== NULL
) TCMalloc_ThreadCache::InitModule();
3143 // Allocate at least one byte to avoid boundary conditions below
3144 if (size
== 0) size
= 1;
3146 if (size
<= kMaxSize
&& align
< kPageSize
) {
3147 // Search through acceptable size classes looking for one with
3148 // enough alignment. This depends on the fact that
3149 // InitSizeClasses() currently produces several size classes that
3150 // are aligned at powers of two. We will waste time and space if
3151 // we miss in the size class array, but that is deemed acceptable
3152 // since memalign() should be used rarely.
3153 size_t cl
= SizeClass(size
);
3154 while (cl
< kNumClasses
&& ((class_to_size
[cl
] & (align
- 1)) != 0)) {
3157 if (cl
< kNumClasses
) {
3158 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3159 return CheckedMallocResult(heap
->Allocate(class_to_size
[cl
]));
3163 // We will allocate directly from the page heap
3164 SpinLockHolder
h(&pageheap_lock
);
3166 if (align
<= kPageSize
) {
3167 // Any page-level allocation will be fine
3168 // TODO: We could put the rest of this page in the appropriate
3169 // TODO: cache but it does not seem worth it.
3170 Span
* span
= pageheap
->New(pages(size
));
3171 return span
== NULL
? NULL
: SpanToMallocResult(span
);
3174 // Allocate extra pages and carve off an aligned portion
3175 const Length alloc
= pages(size
+ align
);
3176 Span
* span
= pageheap
->New(alloc
);
3177 if (span
== NULL
) return NULL
;
3179 // Skip starting portion so that we end up aligned
3181 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
3184 ASSERT(skip
< alloc
);
3186 Span
* rest
= pageheap
->Split(span
, skip
);
3187 pageheap
->Delete(span
);
3191 // Skip trailing portion that we do not need to return
3192 const Length needed
= pages(size
);
3193 ASSERT(span
->length
>= needed
);
3194 if (span
->length
> needed
) {
3195 Span
* trailer
= pageheap
->Split(span
, needed
);
3196 pageheap
->Delete(trailer
);
3198 return SpanToMallocResult(span
);
3202 // Helpers for use by exported routines below:
3205 static inline void do_malloc_stats() {
3210 static inline int do_mallopt(int, int) {
3211 return 1; // Indicates error
3214 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3215 static inline struct mallinfo
do_mallinfo() {
3216 TCMallocStats stats
;
3217 ExtractStats(&stats
, NULL
);
3219 // Just some of the fields are filled in.
3220 struct mallinfo info
;
3221 memset(&info
, 0, sizeof(info
));
3223 // Unfortunately, the struct contains "int" field, so some of the
3224 // size values will be truncated.
3225 info
.arena
= static_cast<int>(stats
.system_bytes
);
3226 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
3227 + stats
.central_bytes
3228 + stats
.transfer_bytes
);
3229 info
.fordblks
= static_cast<int>(stats
.pageheap_bytes
);
3230 info
.uordblks
= static_cast<int>(stats
.system_bytes
3231 - stats
.thread_bytes
3232 - stats
.central_bytes
3233 - stats
.transfer_bytes
3234 - stats
.pageheap_bytes
);
3240 //-------------------------------------------------------------------
3241 // Exported routines
3242 //-------------------------------------------------------------------
3244 // CAVEAT: The code structure below ensures that MallocHook methods are always
3245 // called from the stack frame of the invoked allocation function.
3246 // heap-checker.cc depends on this to start a stack trace from
3247 // the call to the (de)allocation function.
3252 #define do_malloc do_malloc<crashOnFailure>
3254 template <bool crashOnFailure
>
3255 void* malloc(size_t);
3257 void* fastMalloc(size_t size
)
3259 return malloc
<true>(size
);
3262 void* tryFastMalloc(size_t size
)
3264 return malloc
<false>(size
);
3267 template <bool crashOnFailure
>
3270 void* malloc(size_t size
) {
3271 void* result
= do_malloc(size
);
3273 MallocHook::InvokeNewHook(result
, size
);
3281 void free(void* ptr
) {
3283 MallocHook::InvokeDeleteHook(ptr
);
3291 template <bool crashOnFailure
>
3292 void* calloc(size_t, size_t);
3294 void* fastCalloc(size_t n
, size_t elem_size
)
3296 return calloc
<true>(n
, elem_size
);
3299 void* tryFastCalloc(size_t n
, size_t elem_size
)
3301 return calloc
<false>(n
, elem_size
);
3304 template <bool crashOnFailure
>
3307 void* calloc(size_t n
, size_t elem_size
) {
3308 const size_t totalBytes
= n
* elem_size
;
3310 // Protect against overflow
3311 if (n
> 1 && elem_size
&& (totalBytes
/ elem_size
) != n
)
3314 void* result
= do_malloc(totalBytes
);
3315 if (result
!= NULL
) {
3316 memset(result
, 0, totalBytes
);
3319 MallocHook::InvokeNewHook(result
, totalBytes
);
3324 // Since cfree isn't used anywhere, we don't compile it in.
3329 void cfree(void* ptr
) {
3331 MallocHook::InvokeDeleteHook(ptr
);
3340 template <bool crashOnFailure
>
3341 void* realloc(void*, size_t);
3343 void* fastRealloc(void* old_ptr
, size_t new_size
)
3345 return realloc
<true>(old_ptr
, new_size
);
3348 void* tryFastRealloc(void* old_ptr
, size_t new_size
)
3350 return realloc
<false>(old_ptr
, new_size
);
3353 template <bool crashOnFailure
>
3356 void* realloc(void* old_ptr
, size_t new_size
) {
3357 if (old_ptr
== NULL
) {
3358 void* result
= do_malloc(new_size
);
3360 MallocHook::InvokeNewHook(result
, new_size
);
3364 if (new_size
== 0) {
3366 MallocHook::InvokeDeleteHook(old_ptr
);
3372 // Get the size of the old entry
3373 const PageID p
= reinterpret_cast<uintptr_t>(old_ptr
) >> kPageShift
;
3374 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3378 span
= pageheap
->GetDescriptor(p
);
3379 cl
= span
->sizeclass
;
3380 pageheap
->CacheSizeClass(p
, cl
);
3383 old_size
= ByteSizeForClass(cl
);
3385 ASSERT(span
!= NULL
);
3386 old_size
= span
->length
<< kPageShift
;
3389 // Reallocate if the new size is larger than the old size,
3390 // or if the new size is significantly smaller than the old size.
3391 if ((new_size
> old_size
) || (AllocationSize(new_size
) < old_size
)) {
3392 // Need to reallocate
3393 void* new_ptr
= do_malloc(new_size
);
3394 if (new_ptr
== NULL
) {
3398 MallocHook::InvokeNewHook(new_ptr
, new_size
);
3400 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
3402 MallocHook::InvokeDeleteHook(old_ptr
);
3404 // We could use a variant of do_free() that leverages the fact
3405 // that we already know the sizeclass of old_ptr. The benefit
3406 // would be small, so don't bother.
3418 static SpinLock set_new_handler_lock
= SPINLOCK_INITIALIZER
;
3420 static inline void* cpp_alloc(size_t size
, bool nothrow
) {
3422 void* p
= do_malloc(size
);
3426 if (p
== NULL
) { // allocation failed
3427 // Get the current new handler. NB: this function is not
3428 // thread-safe. We make a feeble stab at making it so here, but
3429 // this lock only protects against tcmalloc interfering with
3430 // itself, not with other libraries calling set_new_handler.
3431 std::new_handler nh
;
3433 SpinLockHolder
h(&set_new_handler_lock
);
3434 nh
= std::set_new_handler(0);
3435 (void) std::set_new_handler(nh
);
3437 // If no new_handler is established, the allocation failed.
3439 if (nothrow
) return 0;
3440 throw std::bad_alloc();
3442 // Otherwise, try the new_handler. If it returns, retry the
3443 // allocation. If it throws std::bad_alloc, fail the allocation.
3444 // if it throws something else, don't interfere.
3447 } catch (const std::bad_alloc
&) {
3448 if (!nothrow
) throw;
3451 } else { // allocation success
3458 void* operator new(size_t size
) {
3459 void* p
= cpp_alloc(size
, false);
3460 // We keep this next instruction out of cpp_alloc for a reason: when
3461 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3462 // new call into cpp_alloc, which messes up our whole section-based
3463 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3464 // isn't the last thing this fn calls, and prevents the folding.
3465 MallocHook::InvokeNewHook(p
, size
);
3469 void* operator new(size_t size
, const std::nothrow_t
&) __THROW
{
3470 void* p
= cpp_alloc(size
, true);
3471 MallocHook::InvokeNewHook(p
, size
);
3475 void operator delete(void* p
) __THROW
{
3476 MallocHook::InvokeDeleteHook(p
);
3480 void operator delete(void* p
, const std::nothrow_t
&) __THROW
{
3481 MallocHook::InvokeDeleteHook(p
);
3485 void* operator new[](size_t size
) {
3486 void* p
= cpp_alloc(size
, false);
3487 // We keep this next instruction out of cpp_alloc for a reason: when
3488 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3489 // new call into cpp_alloc, which messes up our whole section-based
3490 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3491 // isn't the last thing this fn calls, and prevents the folding.
3492 MallocHook::InvokeNewHook(p
, size
);
3496 void* operator new[](size_t size
, const std::nothrow_t
&) __THROW
{
3497 void* p
= cpp_alloc(size
, true);
3498 MallocHook::InvokeNewHook(p
, size
);
3502 void operator delete[](void* p
) __THROW
{
3503 MallocHook::InvokeDeleteHook(p
);
3507 void operator delete[](void* p
, const std::nothrow_t
&) __THROW
{
3508 MallocHook::InvokeDeleteHook(p
);
3512 extern "C" void* memalign(size_t align
, size_t size
) __THROW
{
3513 void* result
= do_memalign(align
, size
);
3514 MallocHook::InvokeNewHook(result
, size
);
3518 extern "C" int posix_memalign(void** result_ptr
, size_t align
, size_t size
)
3520 if (((align
% sizeof(void*)) != 0) ||
3521 ((align
& (align
- 1)) != 0) ||
3526 void* result
= do_memalign(align
, size
);
3527 MallocHook::InvokeNewHook(result
, size
);
3528 if (result
== NULL
) {
3531 *result_ptr
= result
;
3536 static size_t pagesize
= 0;
3538 extern "C" void* valloc(size_t size
) __THROW
{
3539 // Allocate page-aligned object of length >= size bytes
3540 if (pagesize
== 0) pagesize
= getpagesize();
3541 void* result
= do_memalign(pagesize
, size
);
3542 MallocHook::InvokeNewHook(result
, size
);
3546 extern "C" void* pvalloc(size_t size
) __THROW
{
3547 // Round up size to a multiple of pagesize
3548 if (pagesize
== 0) pagesize
= getpagesize();
3549 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
3550 void* result
= do_memalign(pagesize
, size
);
3551 MallocHook::InvokeNewHook(result
, size
);
3555 extern "C" void malloc_stats(void) {
3559 extern "C" int mallopt(int cmd
, int value
) {
3560 return do_mallopt(cmd
, value
);
3563 #ifdef HAVE_STRUCT_MALLINFO
3564 extern "C" struct mallinfo
mallinfo(void) {
3565 return do_mallinfo();
3569 //-------------------------------------------------------------------
3570 // Some library routines on RedHat 9 allocate memory using malloc()
3571 // and free it using __libc_free() (or vice-versa). Since we provide
3572 // our own implementations of malloc/free, we need to make sure that
3573 // the __libc_XXX variants (defined as part of glibc) also point to
3574 // the same implementations.
3575 //-------------------------------------------------------------------
3577 #if defined(__GLIBC__)
3579 # if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
3580 // Potentially faster variants that use the gcc alias extension.
3581 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
3582 # define ALIAS(x) __attribute__ ((weak, alias (x)))
3583 void* __libc_malloc(size_t size
) ALIAS("malloc");
3584 void __libc_free(void* ptr
) ALIAS("free");
3585 void* __libc_realloc(void* ptr
, size_t size
) ALIAS("realloc");
3586 void* __libc_calloc(size_t n
, size_t size
) ALIAS("calloc");
3587 void __libc_cfree(void* ptr
) ALIAS("cfree");
3588 void* __libc_memalign(size_t align
, size_t s
) ALIAS("memalign");
3589 void* __libc_valloc(size_t size
) ALIAS("valloc");
3590 void* __libc_pvalloc(size_t size
) ALIAS("pvalloc");
3591 int __posix_memalign(void** r
, size_t a
, size_t s
) ALIAS("posix_memalign");
3593 # else /* not __GNUC__ */
3594 // Portable wrappers
3595 void* __libc_malloc(size_t size
) { return malloc(size
); }
3596 void __libc_free(void* ptr
) { free(ptr
); }
3597 void* __libc_realloc(void* ptr
, size_t size
) { return realloc(ptr
, size
); }
3598 void* __libc_calloc(size_t n
, size_t size
) { return calloc(n
, size
); }
3599 void __libc_cfree(void* ptr
) { cfree(ptr
); }
3600 void* __libc_memalign(size_t align
, size_t s
) { return memalign(align
, s
); }
3601 void* __libc_valloc(size_t size
) { return valloc(size
); }
3602 void* __libc_pvalloc(size_t size
) { return pvalloc(size
); }
3603 int __posix_memalign(void** r
, size_t a
, size_t s
) {
3604 return posix_memalign(r
, a
, s
);
3606 # endif /* __GNUC__ */
3608 #endif /* __GLIBC__ */
3610 // Override __libc_memalign in libc on linux boxes specially.
3611 // They have a bug in libc that causes them to (very rarely) allocate
3612 // with __libc_memalign() yet deallocate with free() and the
3613 // definitions above don't catch it.
3614 // This function is an exception to the rule of calling MallocHook method
3615 // from the stack frame of the allocation function;
3616 // heap-checker handles this special case explicitly.
3617 static void *MemalignOverride(size_t align
, size_t size
, const void *caller
)
3619 void* result
= do_memalign(align
, size
);
3620 MallocHook::InvokeNewHook(result
, size
);
3623 void *(*__memalign_hook
)(size_t, size_t, const void *) = MemalignOverride
;
3627 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
3629 class FreeObjectFinder
{
3630 const RemoteMemoryReader
& m_reader
;
3631 HashSet
<void*> m_freeObjects
;
3634 FreeObjectFinder(const RemoteMemoryReader
& reader
) : m_reader(reader
) { }
3636 void visit(void* ptr
) { m_freeObjects
.add(ptr
); }
3637 bool isFreeObject(void* ptr
) const { return m_freeObjects
.contains(ptr
); }
3638 size_t freeObjectCount() const { return m_freeObjects
.size(); }
3640 void findFreeObjects(TCMalloc_ThreadCache
* threadCache
)
3642 for (; threadCache
; threadCache
= (threadCache
->next_
? m_reader(threadCache
->next_
) : 0))
3643 threadCache
->enumerateFreeObjects(*this, m_reader
);
3646 void findFreeObjects(TCMalloc_Central_FreeListPadded
* centralFreeList
, size_t numSizes
, TCMalloc_Central_FreeListPadded
* remoteCentralFreeList
)
3648 for (unsigned i
= 0; i
< numSizes
; i
++)
3649 centralFreeList
[i
].enumerateFreeObjects(*this, m_reader
, remoteCentralFreeList
+ i
);
3653 class PageMapFreeObjectFinder
{
3654 const RemoteMemoryReader
& m_reader
;
3655 FreeObjectFinder
& m_freeObjectFinder
;
3658 PageMapFreeObjectFinder(const RemoteMemoryReader
& reader
, FreeObjectFinder
& freeObjectFinder
)
3660 , m_freeObjectFinder(freeObjectFinder
)
3663 int visit(void* ptr
) const
3668 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3670 void* ptr
= reinterpret_cast<void*>(span
->start
<< kPageShift
);
3671 m_freeObjectFinder
.visit(ptr
);
3672 } else if (span
->sizeclass
) {
3673 // Walk the free list of the small-object span, keeping track of each object seen
3674 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *m_reader(reinterpret_cast<void**>(nextObject
)))
3675 m_freeObjectFinder
.visit(nextObject
);
3677 return span
->length
;
3681 class PageMapMemoryUsageRecorder
{
3684 unsigned m_typeMask
;
3685 vm_range_recorder_t
* m_recorder
;
3686 const RemoteMemoryReader
& m_reader
;
3687 const FreeObjectFinder
& m_freeObjectFinder
;
3688 mutable HashSet
<void*> m_seenPointers
;
3691 PageMapMemoryUsageRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
, const FreeObjectFinder
& freeObjectFinder
)
3693 , m_context(context
)
3694 , m_typeMask(typeMask
)
3695 , m_recorder(recorder
)
3697 , m_freeObjectFinder(freeObjectFinder
)
3700 int visit(void* ptr
) const
3705 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3706 if (m_seenPointers
.contains(ptr
))
3707 return span
->length
;
3708 m_seenPointers
.add(ptr
);
3710 // Mark the memory used for the Span itself as an administrative region
3711 vm_range_t ptrRange
= { reinterpret_cast<vm_address_t
>(ptr
), sizeof(Span
) };
3712 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3713 (*m_recorder
)(m_task
, m_context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &ptrRange
, 1);
3715 ptrRange
.address
= span
->start
<< kPageShift
;
3716 ptrRange
.size
= span
->length
* kPageSize
;
3718 // Mark the memory region the span represents as candidates for containing pointers
3719 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3720 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptrRange
, 1);
3722 if (!span
->free
&& (m_typeMask
& MALLOC_PTR_IN_USE_RANGE_TYPE
)) {
3723 // If it's an allocated large object span, mark it as in use
3724 if (span
->sizeclass
== 0 && !m_freeObjectFinder
.isFreeObject(reinterpret_cast<void*>(ptrRange
.address
)))
3725 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &ptrRange
, 1);
3726 else if (span
->sizeclass
) {
3727 const size_t byteSize
= ByteSizeForClass(span
->sizeclass
);
3728 unsigned totalObjects
= (span
->length
<< kPageShift
) / byteSize
;
3729 ASSERT(span
->refcount
<= totalObjects
);
3730 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
3732 // Mark each allocated small object within the span as in use
3733 for (unsigned i
= 0; i
< totalObjects
; i
++) {
3734 char* thisObject
= ptr
+ (i
* byteSize
);
3735 if (m_freeObjectFinder
.isFreeObject(thisObject
))
3738 vm_range_t objectRange
= { reinterpret_cast<vm_address_t
>(thisObject
), byteSize
};
3739 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &objectRange
, 1);
3744 return span
->length
;
3748 kern_return_t
FastMallocZone::enumerate(task_t task
, void* context
, unsigned typeMask
, vm_address_t zoneAddress
, memory_reader_t reader
, vm_range_recorder_t recorder
)
3750 RemoteMemoryReader
memoryReader(task
, reader
);
3754 FastMallocZone
* mzone
= memoryReader(reinterpret_cast<FastMallocZone
*>(zoneAddress
));
3755 TCMalloc_PageHeap
* pageHeap
= memoryReader(mzone
->m_pageHeap
);
3756 TCMalloc_ThreadCache
** threadHeapsPointer
= memoryReader(mzone
->m_threadHeaps
);
3757 TCMalloc_ThreadCache
* threadHeaps
= memoryReader(*threadHeapsPointer
);
3759 TCMalloc_Central_FreeListPadded
* centralCaches
= memoryReader(mzone
->m_centralCaches
, sizeof(TCMalloc_Central_FreeListPadded
) * kNumClasses
);
3761 FreeObjectFinder
finder(memoryReader
);
3762 finder
.findFreeObjects(threadHeaps
);
3763 finder
.findFreeObjects(centralCaches
, kNumClasses
, mzone
->m_centralCaches
);
3765 TCMalloc_PageHeap::PageMap
* pageMap
= &pageHeap
->pagemap_
;
3766 PageMapFreeObjectFinder
pageMapFinder(memoryReader
, finder
);
3767 pageMap
->visit(pageMapFinder
, memoryReader
);
3769 PageMapMemoryUsageRecorder
usageRecorder(task
, context
, typeMask
, recorder
, memoryReader
, finder
);
3770 pageMap
->visit(usageRecorder
, memoryReader
);
3775 size_t FastMallocZone::size(malloc_zone_t
*, const void*)
3780 void* FastMallocZone::zoneMalloc(malloc_zone_t
*, size_t)
3785 void* FastMallocZone::zoneCalloc(malloc_zone_t
*, size_t, size_t)
3790 void FastMallocZone::zoneFree(malloc_zone_t
*, void* ptr
)
3792 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
3793 // is not in this zone. When this happens, the pointer being freed was not allocated by any
3794 // zone so we need to print a useful error for the application developer.
3795 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr
);
3798 void* FastMallocZone::zoneRealloc(malloc_zone_t
*, void*, size_t)
3810 malloc_introspection_t jscore_fastmalloc_introspection
= { &FastMallocZone::enumerate
, &FastMallocZone::goodSize
, &FastMallocZone::check
, &FastMallocZone::print
,
3811 &FastMallocZone::log
, &FastMallocZone::forceLock
, &FastMallocZone::forceUnlock
, &FastMallocZone::statistics
3815 FastMallocZone::FastMallocZone(TCMalloc_PageHeap
* pageHeap
, TCMalloc_ThreadCache
** threadHeaps
, TCMalloc_Central_FreeListPadded
* centralCaches
)
3816 : m_pageHeap(pageHeap
)
3817 , m_threadHeaps(threadHeaps
)
3818 , m_centralCaches(centralCaches
)
3820 memset(&m_zone
, 0, sizeof(m_zone
));
3822 m_zone
.zone_name
= "JavaScriptCore FastMalloc";
3823 m_zone
.size
= &FastMallocZone::size
;
3824 m_zone
.malloc
= &FastMallocZone::zoneMalloc
;
3825 m_zone
.calloc
= &FastMallocZone::zoneCalloc
;
3826 m_zone
.realloc
= &FastMallocZone::zoneRealloc
;
3827 m_zone
.free
= &FastMallocZone::zoneFree
;
3828 m_zone
.valloc
= &FastMallocZone::zoneValloc
;
3829 m_zone
.destroy
= &FastMallocZone::zoneDestroy
;
3830 m_zone
.introspect
= &jscore_fastmalloc_introspection
;
3831 malloc_zone_register(&m_zone
);
3835 void FastMallocZone::init()
3837 static FastMallocZone
zone(pageheap
, &thread_heaps
, static_cast<TCMalloc_Central_FreeListPadded
*>(central_cache
));
3843 void releaseFastMallocFreeMemory()
3845 // Flush free pages in the current thread cache back to the page heap.
3846 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
3847 // The second pass flushes everything.
3848 if (TCMalloc_ThreadCache
* threadCache
= TCMalloc_ThreadCache::GetCacheIfPresent()) {
3849 threadCache
->Scavenge();
3850 threadCache
->Scavenge();
3853 SpinLockHolder
h(&pageheap_lock
);
3854 pageheap
->ReleaseFreePages();
3857 FastMallocStatistics
fastMallocStatistics()
3859 FastMallocStatistics statistics
;
3861 SpinLockHolder
lockHolder(&pageheap_lock
);
3862 statistics
.heapSize
= static_cast<size_t>(pageheap
->SystemBytes());
3863 statistics
.freeSizeInHeap
= static_cast<size_t>(pageheap
->FreeBytes());
3864 statistics
.returnedSize
= pageheap
->ReturnedBytes();
3865 statistics
.freeSizeInCaches
= 0;
3866 for (TCMalloc_ThreadCache
* threadCache
= thread_heaps
; threadCache
; threadCache
= threadCache
->next_
)
3867 statistics
.freeSizeInCaches
+= threadCache
->Size();
3869 for (unsigned cl
= 0; cl
< kNumClasses
; ++cl
) {
3870 const int length
= central_cache
[cl
].length();
3871 const int tc_length
= central_cache
[cl
].tc_length();
3872 statistics
.freeSizeInCaches
+= ByteSizeForClass(cl
) * (length
+ tc_length
);
3880 #endif // FORCE_SYSTEM_MALLOC