1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "FastMalloc.h"
80 #include "Assertions.h"
81 #if USE(MULTIPLE_THREADS)
85 #ifndef NO_TCMALLOC_SAMPLES
87 #define NO_TCMALLOC_SAMPLES
91 #if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
92 #define FORCE_SYSTEM_MALLOC 0
94 #define FORCE_SYSTEM_MALLOC 1
100 #if USE(MULTIPLE_THREADS)
101 static pthread_key_t isForbiddenKey
;
102 static pthread_once_t isForbiddenKeyOnce
= PTHREAD_ONCE_INIT
;
103 static void initializeIsForbiddenKey()
105 pthread_key_create(&isForbiddenKey
, 0);
108 static bool isForbidden()
110 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
111 return !!pthread_getspecific(isForbiddenKey
);
114 void fastMallocForbid()
116 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
117 pthread_setspecific(isForbiddenKey
, &isForbiddenKey
);
120 void fastMallocAllow()
122 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
123 pthread_setspecific(isForbiddenKey
, 0);
128 static bool staticIsForbidden
;
129 static bool isForbidden()
131 return staticIsForbidden
;
134 void fastMallocForbid()
136 staticIsForbidden
= true;
139 void fastMallocAllow()
141 staticIsForbidden
= false;
143 #endif // USE(MULTIPLE_THREADS)
151 void *fastZeroedMalloc(size_t n
)
153 void *result
= fastMalloc(n
);
156 memset(result
, 0, n
);
158 MallocHook::InvokeNewHook(result
, n
);
165 #if FORCE_SYSTEM_MALLOC
168 #if !PLATFORM(WIN_OS)
174 void *fastMalloc(size_t n
)
176 ASSERT(!isForbidden());
180 void *fastCalloc(size_t n_elements
, size_t element_size
)
182 ASSERT(!isForbidden());
183 return calloc(n_elements
, element_size
);
186 void fastFree(void* p
)
188 ASSERT(!isForbidden());
192 void *fastRealloc(void* p
, size_t n
)
194 ASSERT(!isForbidden());
195 return realloc(p
, n
);
198 void releaseFastMallocFreeMemory() { }
203 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
204 // It will never be used in this case, so it's type and value are less interesting than its presence.
205 extern "C" const int jscore_fastmalloc_introspection
= 0;
208 #else // FORCE_SYSTEM_MALLOC
212 #elif HAVE(INTTYPES_H)
213 #include <inttypes.h>
215 #include <sys/types.h>
218 #include "AlwaysInline.h"
219 #include "Assertions.h"
220 #include "TCPackedCache.h"
221 #include "TCPageMap.h"
222 #include "TCSpinLock.h"
223 #include "TCSystemAlloc.h"
232 #ifndef WIN32_LEAN_AND_MEAN
233 #define WIN32_LEAN_AND_MEAN
241 #include "MallocZoneSupport.h"
248 // Calling pthread_getspecific through a global function pointer is faster than a normal
249 // call to the function on Mac OS X, and it's used in performance-critical code. So we
250 // use a function pointer. But that's not necessarily faster on other platforms, and we had
251 // problems with this technique on Windows, so we'll do this only on Mac OS X.
253 static void* (*pthread_getspecific_function_pointer
)(pthread_key_t
) = pthread_getspecific
;
254 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
257 #define DEFINE_VARIABLE(type, name, value, meaning) \
258 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
259 type FLAGS_##name(value); \
260 char FLAGS_no##name; \
262 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
264 #define DEFINE_int64(name, value, meaning) \
265 DEFINE_VARIABLE(int64_t, name, value, meaning)
267 #define DEFINE_double(name, value, meaning) \
268 DEFINE_VARIABLE(double, name, value, meaning)
272 #define malloc fastMalloc
273 #define calloc fastCalloc
274 #define free fastFree
275 #define realloc fastRealloc
277 #define MESSAGE LOG_ERROR
278 #define CHECK_CONDITION ASSERT
281 class TCMalloc_PageHeap
;
282 class TCMalloc_ThreadCache
;
283 class TCMalloc_Central_FreeListPadded
;
285 class FastMallocZone
{
289 static kern_return_t
enumerate(task_t
, void*, unsigned typeMmask
, vm_address_t zoneAddress
, memory_reader_t
, vm_range_recorder_t
);
290 static size_t goodSize(malloc_zone_t
*, size_t size
) { return size
; }
291 static boolean_t
check(malloc_zone_t
*) { return true; }
292 static void print(malloc_zone_t
*, boolean_t
) { }
293 static void log(malloc_zone_t
*, void*) { }
294 static void forceLock(malloc_zone_t
*) { }
295 static void forceUnlock(malloc_zone_t
*) { }
296 static void statistics(malloc_zone_t
*, malloc_statistics_t
*) { }
299 FastMallocZone(TCMalloc_PageHeap
*, TCMalloc_ThreadCache
**, TCMalloc_Central_FreeListPadded
*);
300 static size_t size(malloc_zone_t
*, const void*);
301 static void* zoneMalloc(malloc_zone_t
*, size_t);
302 static void* zoneCalloc(malloc_zone_t
*, size_t numItems
, size_t size
);
303 static void zoneFree(malloc_zone_t
*, void*);
304 static void* zoneRealloc(malloc_zone_t
*, void*, size_t);
305 static void* zoneValloc(malloc_zone_t
*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
306 static void zoneDestroy(malloc_zone_t
*) { }
308 malloc_zone_t m_zone
;
309 TCMalloc_PageHeap
* m_pageHeap
;
310 TCMalloc_ThreadCache
** m_threadHeaps
;
311 TCMalloc_Central_FreeListPadded
* m_centralCaches
;
319 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
320 // you're porting to a system where you really can't get a stacktrace.
321 #ifdef NO_TCMALLOC_SAMPLES
322 // We use #define so code compiles even if you #include stacktrace.h somehow.
323 # define GetStackTrace(stack, depth, skip) (0)
325 # include <google/stacktrace.h>
329 // Even if we have support for thread-local storage in the compiler
330 // and linker, the OS may not support it. We need to check that at
331 // runtime. Right now, we have to keep a manual set of "bad" OSes.
332 #if defined(HAVE_TLS)
333 static bool kernel_supports_tls
= false; // be conservative
334 static inline bool KernelSupportsTLS() {
335 return kernel_supports_tls
;
337 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
338 static void CheckIfKernelSupportsTLS() {
339 kernel_supports_tls
= false;
342 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
343 static void CheckIfKernelSupportsTLS() {
345 if (uname(&buf
) != 0) { // should be impossible
346 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno
);
347 kernel_supports_tls
= false;
348 } else if (strcasecmp(buf
.sysname
, "linux") == 0) {
349 // The linux case: the first kernel to support TLS was 2.6.0
350 if (buf
.release
[0] < '2' && buf
.release
[1] == '.') // 0.x or 1.x
351 kernel_supports_tls
= false;
352 else if (buf
.release
[0] == '2' && buf
.release
[1] == '.' &&
353 buf
.release
[2] >= '0' && buf
.release
[2] < '6' &&
354 buf
.release
[3] == '.') // 2.0 - 2.5
355 kernel_supports_tls
= false;
357 kernel_supports_tls
= true;
358 } else { // some other kernel, we'll be optimisitic
359 kernel_supports_tls
= true;
361 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
363 # endif // HAVE_DECL_UNAME
366 // __THROW is defined in glibc systems. It means, counter-intuitively,
367 // "This function will never throw an exception." It's an optional
368 // optimization tool, but we may need to use it to match glibc prototypes.
369 #ifndef __THROW // I guess we're not on a glibc system
370 # define __THROW // __THROW is just an optimization, so ok to make it ""
373 //-------------------------------------------------------------------
375 //-------------------------------------------------------------------
377 // Not all possible combinations of the following parameters make
378 // sense. In particular, if kMaxSize increases, you may have to
379 // increase kNumClasses as well.
380 static const size_t kPageShift
= 12;
381 static const size_t kPageSize
= 1 << kPageShift
;
382 static const size_t kMaxSize
= 8u * kPageSize
;
383 static const size_t kAlignShift
= 3;
384 static const size_t kAlignment
= 1 << kAlignShift
;
385 static const size_t kNumClasses
= 68;
387 // Allocates a big block of memory for the pagemap once we reach more than
389 static const size_t kPageMapBigAllocationThreshold
= 128 << 20;
391 // Minimum number of pages to fetch from system at a time. Must be
392 // significantly bigger than kBlockSize to amortize system-call
393 // overhead, and also to reduce external fragementation. Also, we
394 // should keep this value big because various incarnations of Linux
395 // have small limits on the number of mmap() regions per
397 static const size_t kMinSystemAlloc
= 1 << (20 - kPageShift
);
399 // Number of objects to move between a per-thread list and a central
400 // list in one shot. We want this to be not too small so we can
401 // amortize the lock overhead for accessing the central list. Making
402 // it too big may temporarily cause unnecessary memory wastage in the
403 // per-thread free list until the scavenger cleans up the list.
404 static int num_objects_to_move
[kNumClasses
];
406 // Maximum length we allow a per-thread free-list to have before we
407 // move objects from it into the corresponding central free-list. We
408 // want this big to avoid locking the central free-list too often. It
409 // should not hurt to make this list somewhat big because the
410 // scavenging code will shrink it down when its contents are not in use.
411 static const int kMaxFreeListLength
= 256;
413 // Lower and upper bounds on the per-thread cache sizes
414 static const size_t kMinThreadCacheSize
= kMaxSize
* 2;
415 static const size_t kMaxThreadCacheSize
= 2 << 20;
417 // Default bound on the total amount of thread caches
418 static const size_t kDefaultOverallThreadCacheSize
= 16 << 20;
420 // For all span-lengths < kMaxPages we keep an exact-size list.
421 // REQUIRED: kMaxPages >= kMinSystemAlloc;
422 static const size_t kMaxPages
= kMinSystemAlloc
;
424 /* The smallest prime > 2^n */
425 static int primes_list
[] = {
426 // Small values might cause high rates of sampling
427 // and hence commented out.
428 // 2, 5, 11, 17, 37, 67, 131, 257,
429 // 521, 1031, 2053, 4099, 8209, 16411,
430 32771, 65537, 131101, 262147, 524309, 1048583,
431 2097169, 4194319, 8388617, 16777259, 33554467 };
433 // Twice the approximate gap between sampling actions.
434 // I.e., we take one sample approximately once every
435 // tcmalloc_sample_parameter/2
436 // bytes of allocation, i.e., ~ once every 128KB.
437 // Must be a prime number.
438 #ifdef NO_TCMALLOC_SAMPLES
439 DEFINE_int64(tcmalloc_sample_parameter
, 0,
440 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
441 static size_t sample_period
= 0;
443 DEFINE_int64(tcmalloc_sample_parameter
, 262147,
444 "Twice the approximate gap between sampling actions."
445 " Must be a prime number. Otherwise will be rounded up to a "
446 " larger prime number");
447 static size_t sample_period
= 262147;
450 // Protects sample_period above
451 static SpinLock sample_period_lock
= SPINLOCK_INITIALIZER
;
453 // Parameters for controlling how fast memory is returned to the OS.
455 DEFINE_double(tcmalloc_release_rate
, 1,
456 "Rate at which we release unused memory to the system. "
457 "Zero means we never release memory back to the system. "
458 "Increase this flag to return memory faster; decrease it "
459 "to return memory slower. Reasonable rates are in the "
462 //-------------------------------------------------------------------
463 // Mapping from size to size_class and vice versa
464 //-------------------------------------------------------------------
466 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
467 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
468 // So for these larger sizes we have an array indexed by ceil(size/128).
470 // We flatten both logical arrays into one physical array and use
471 // arithmetic to compute an appropriate index. The constants used by
472 // ClassIndex() were selected to make the flattening work.
475 // Size Expression Index
476 // -------------------------------------------------------
480 // 1024 (1024 + 7) / 8 128
481 // 1025 (1025 + 127 + (120<<7)) / 128 129
483 // 32768 (32768 + 127 + (120<<7)) / 128 376
484 static const size_t kMaxSmallSize
= 1024;
485 static const int shift_amount
[2] = { 3, 7 }; // For divides by 8 or 128
486 static const int add_amount
[2] = { 7, 127 + (120 << 7) };
487 static unsigned char class_array
[377];
489 // Compute index of the class_array[] entry for a given size
490 static inline int ClassIndex(size_t s
) {
491 const int i
= (s
> kMaxSmallSize
);
492 return static_cast<int>((s
+ add_amount
[i
]) >> shift_amount
[i
]);
495 // Mapping from size class to max size storable in that class
496 static size_t class_to_size
[kNumClasses
];
498 // Mapping from size class to number of pages to allocate at a time
499 static size_t class_to_pages
[kNumClasses
];
501 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
502 // back and forth between thread caches and the central cache for a given size
505 void *head
; // Head of chain of objects.
506 void *tail
; // Tail of chain of objects.
508 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
509 // slots to put link list chains into. To keep memory usage bounded the total
510 // number of TCEntries across size classes is fixed. Currently each size
511 // class is initially given one TCEntry which also means that the maximum any
512 // one class can have is kNumClasses.
513 static const int kNumTransferEntries
= kNumClasses
;
515 // Note: the following only works for "n"s that fit in 32-bits, but
516 // that is fine since we only use it for small sizes.
517 static inline int LgFloor(size_t n
) {
519 for (int i
= 4; i
>= 0; --i
) {
520 int shift
= (1 << i
);
521 size_t x
= n
>> shift
;
531 // Some very basic linked list functions for dealing with using void * as
534 static inline void *SLL_Next(void *t
) {
535 return *(reinterpret_cast<void**>(t
));
538 static inline void SLL_SetNext(void *t
, void *n
) {
539 *(reinterpret_cast<void**>(t
)) = n
;
542 static inline void SLL_Push(void **list
, void *element
) {
543 SLL_SetNext(element
, *list
);
547 static inline void *SLL_Pop(void **list
) {
548 void *result
= *list
;
549 *list
= SLL_Next(*list
);
554 // Remove N elements from a linked list to which head points. head will be
555 // modified to point to the new head. start and end will point to the first
556 // and last nodes of the range. Note that end will point to NULL after this
557 // function is called.
558 static inline void SLL_PopRange(void **head
, int N
, void **start
, void **end
) {
566 for (int i
= 1; i
< N
; ++i
) {
572 *head
= SLL_Next(tmp
);
573 // Unlink range from list.
574 SLL_SetNext(tmp
, NULL
);
577 static inline void SLL_PushRange(void **head
, void *start
, void *end
) {
579 SLL_SetNext(end
, *head
);
583 static inline size_t SLL_Size(void *head
) {
587 head
= SLL_Next(head
);
592 // Setup helper functions.
594 static ALWAYS_INLINE
size_t SizeClass(size_t size
) {
595 return class_array
[ClassIndex(size
)];
598 // Get the byte-size for a specified class
599 static ALWAYS_INLINE
size_t ByteSizeForClass(size_t cl
) {
600 return class_to_size
[cl
];
602 static int NumMoveSize(size_t size
) {
603 if (size
== 0) return 0;
604 // Use approx 64k transfers between thread and central caches.
605 int num
= static_cast<int>(64.0 * 1024.0 / size
);
606 if (num
< 2) num
= 2;
607 // Clamp well below kMaxFreeListLength to avoid ping pong between central
608 // and thread caches.
609 if (num
> static_cast<int>(0.8 * kMaxFreeListLength
))
610 num
= static_cast<int>(0.8 * kMaxFreeListLength
);
612 // Also, avoid bringing in too many objects into small object free
613 // lists. There are lots of such lists, and if we allow each one to
614 // fetch too many at a time, we end up having to scavenge too often
615 // (especially when there are lots of threads and each thread gets a
616 // small allowance for its thread cache).
618 // TODO: Make thread cache free list sizes dynamic so that we do not
619 // have to equally divide a fixed resource amongst lots of threads.
620 if (num
> 32) num
= 32;
625 // Initialize the mapping arrays
626 static void InitSizeClasses() {
627 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
628 if (ClassIndex(0) < 0) {
629 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
632 if (static_cast<size_t>(ClassIndex(kMaxSize
)) >= sizeof(class_array
)) {
633 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize
));
637 // Compute the size classes we want to use
638 size_t sc
= 1; // Next size class to assign
639 unsigned char alignshift
= kAlignShift
;
641 for (size_t size
= kAlignment
; size
<= kMaxSize
; size
+= (1 << alignshift
)) {
642 int lg
= LgFloor(size
);
644 // Increase alignment every so often.
646 // Since we double the alignment every time size doubles and
647 // size >= 128, this means that space wasted due to alignment is
648 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
649 // bytes, so the space wasted as a percentage starts falling for
651 if ((lg
>= 7) && (alignshift
< 8)) {
657 // Allocate enough pages so leftover is less than 1/8 of total.
658 // This bounds wasted space to at most 12.5%.
659 size_t psize
= kPageSize
;
660 while ((psize
% size
) > (psize
>> 3)) {
663 const size_t my_pages
= psize
>> kPageShift
;
665 if (sc
> 1 && my_pages
== class_to_pages
[sc
-1]) {
666 // See if we can merge this into the previous class without
667 // increasing the fragmentation of the previous class.
668 const size_t my_objects
= (my_pages
<< kPageShift
) / size
;
669 const size_t prev_objects
= (class_to_pages
[sc
-1] << kPageShift
)
670 / class_to_size
[sc
-1];
671 if (my_objects
== prev_objects
) {
672 // Adjust last class to include this size
673 class_to_size
[sc
-1] = size
;
679 class_to_pages
[sc
] = my_pages
;
680 class_to_size
[sc
] = size
;
683 if (sc
!= kNumClasses
) {
684 MESSAGE("wrong number of size classes: found %" PRIuS
" instead of %d\n",
685 sc
, int(kNumClasses
));
689 // Initialize the mapping arrays
691 for (unsigned char c
= 1; c
< kNumClasses
; c
++) {
692 const size_t max_size_in_class
= class_to_size
[c
];
693 for (size_t s
= next_size
; s
<= max_size_in_class
; s
+= kAlignment
) {
694 class_array
[ClassIndex(s
)] = c
;
696 next_size
= static_cast<int>(max_size_in_class
+ kAlignment
);
699 // Double-check sizes just to be safe
700 for (size_t size
= 0; size
<= kMaxSize
; size
++) {
701 const size_t sc
= SizeClass(size
);
703 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
706 if (sc
> 1 && size
<= class_to_size
[sc
-1]) {
707 MESSAGE("Allocating unnecessarily large class %" PRIuS
" for %" PRIuS
711 if (sc
>= kNumClasses
) {
712 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
715 const size_t s
= class_to_size
[sc
];
717 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
721 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
726 // Initialize the num_objects_to_move array.
727 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
728 num_objects_to_move
[cl
] = NumMoveSize(ByteSizeForClass(cl
));
733 // Dump class sizes and maximum external wastage per size class
734 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
735 const int alloc_size
= class_to_pages
[cl
] << kPageShift
;
736 const int alloc_objs
= alloc_size
/ class_to_size
[cl
];
737 const int min_used
= (class_to_size
[cl
-1] + 1) * alloc_objs
;
738 const int max_waste
= alloc_size
- min_used
;
739 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
741 int(class_to_size
[cl
-1] + 1),
742 int(class_to_size
[cl
]),
743 int(class_to_pages
[cl
] << kPageShift
),
744 max_waste
* 100.0 / alloc_size
751 // -------------------------------------------------------------------------
752 // Simple allocator for objects of a specified type. External locking
753 // is required before accessing one of these objects.
754 // -------------------------------------------------------------------------
756 // Metadata allocator -- keeps stats about how many bytes allocated
757 static uint64_t metadata_system_bytes
= 0;
758 static void* MetaDataAlloc(size_t bytes
) {
759 void* result
= TCMalloc_SystemAlloc(bytes
, 0);
760 if (result
!= NULL
) {
761 metadata_system_bytes
+= bytes
;
767 class PageHeapAllocator
{
769 // How much to allocate from system at a time
770 static const size_t kAllocIncrement
= 32 << 10;
773 static const size_t kAlignedSize
774 = (((sizeof(T
) + kAlignment
- 1) / kAlignment
) * kAlignment
);
776 // Free area from which to carve new objects
780 // Free list of already carved objects
783 // Number of allocated but unfreed objects
788 ASSERT(kAlignedSize
<= kAllocIncrement
);
798 if (free_list_
!= NULL
) {
800 free_list_
= *(reinterpret_cast<void**>(result
));
802 if (free_avail_
< kAlignedSize
) {
804 free_area_
= reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement
));
805 if (free_area_
== NULL
) abort();
806 free_avail_
= kAllocIncrement
;
809 free_area_
+= kAlignedSize
;
810 free_avail_
-= kAlignedSize
;
813 return reinterpret_cast<T
*>(result
);
817 *(reinterpret_cast<void**>(p
)) = free_list_
;
822 int inuse() const { return inuse_
; }
825 // -------------------------------------------------------------------------
826 // Span - a contiguous run of pages
827 // -------------------------------------------------------------------------
829 // Type that can hold a page number
830 typedef uintptr_t PageID
;
832 // Type that can hold the length of a run of pages
833 typedef uintptr_t Length
;
835 static const Length kMaxValidPages
= (~static_cast<Length
>(0)) >> kPageShift
;
837 // Convert byte size into pages. This won't overflow, but may return
838 // an unreasonably large value if bytes is huge enough.
839 static inline Length
pages(size_t bytes
) {
840 return (bytes
>> kPageShift
) +
841 ((bytes
& (kPageSize
- 1)) > 0 ? 1 : 0);
844 // Convert a user size into the number of bytes that will actually be
846 static size_t AllocationSize(size_t bytes
) {
847 if (bytes
> kMaxSize
) {
848 // Large object: we allocate an integral number of pages
849 ASSERT(bytes
<= (kMaxValidPages
<< kPageShift
));
850 return pages(bytes
) << kPageShift
;
852 // Small object: find the size class to which it belongs
853 return ByteSizeForClass(SizeClass(bytes
));
857 // Information kept for a span (a contiguous run of pages).
859 PageID start
; // Starting page number
860 Length length
; // Number of pages in span
861 Span
* next
; // Used when in link list
862 Span
* prev
; // Used when in link list
863 void* objects
; // Linked list of free objects
864 unsigned int free
: 1; // Is the span free
865 unsigned int sample
: 1; // Sampled object?
866 unsigned int sizeclass
: 8; // Size-class for small objects (or 0)
867 unsigned int refcount
: 11; // Number of non-free objects
871 // For debugging, we can keep a log events per span
879 void Event(Span
* span
, char op
, int v
= 0) {
880 span
->history
[span
->nexthistory
] = op
;
881 span
->value
[span
->nexthistory
] = v
;
883 if (span
->nexthistory
== sizeof(span
->history
)) span
->nexthistory
= 0;
886 #define Event(s,o,v) ((void) 0)
889 // Allocator/deallocator for spans
890 static PageHeapAllocator
<Span
> span_allocator
;
891 static Span
* NewSpan(PageID p
, Length len
) {
892 Span
* result
= span_allocator
.New();
893 memset(result
, 0, sizeof(*result
));
895 result
->length
= len
;
897 result
->nexthistory
= 0;
902 static inline void DeleteSpan(Span
* span
) {
904 // In debug mode, trash the contents of deleted Spans
905 memset(span
, 0x3f, sizeof(*span
));
907 span_allocator
.Delete(span
);
910 // -------------------------------------------------------------------------
911 // Doubly linked list of spans.
912 // -------------------------------------------------------------------------
914 static inline void DLL_Init(Span
* list
) {
919 static inline void DLL_Remove(Span
* span
) {
920 span
->prev
->next
= span
->next
;
921 span
->next
->prev
= span
->prev
;
926 static ALWAYS_INLINE
bool DLL_IsEmpty(const Span
* list
) {
927 return list
->next
== list
;
931 static int DLL_Length(const Span
* list
) {
933 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
940 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
941 static void DLL_Print(const char* label
, const Span
* list
) {
942 MESSAGE("%-10s %p:", label
, list
);
943 for (const Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
944 MESSAGE(" <%p,%u,%u>", s
, s
->start
, s
->length
);
950 static inline void DLL_Prepend(Span
* list
, Span
* span
) {
951 ASSERT(span
->next
== NULL
);
952 ASSERT(span
->prev
== NULL
);
953 span
->next
= list
->next
;
955 list
->next
->prev
= span
;
959 // -------------------------------------------------------------------------
960 // Stack traces kept for sampled allocations
961 // The following state is protected by pageheap_lock_.
962 // -------------------------------------------------------------------------
964 // size/depth are made the same size as a pointer so that some generic
965 // code below can conveniently cast them back and forth to void*.
966 static const int kMaxStackDepth
= 31;
968 uintptr_t size
; // Size of object
969 uintptr_t depth
; // Number of PC values stored in array below
970 void* stack
[kMaxStackDepth
];
972 static PageHeapAllocator
<StackTrace
> stacktrace_allocator
;
973 static Span sampled_objects
;
975 // -------------------------------------------------------------------------
976 // Map from page-id to per-page data
977 // -------------------------------------------------------------------------
979 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
980 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
981 // because sometimes the sizeclass is all the information we need.
983 // Selector class -- general selector uses 3-level map
984 template <int BITS
> class MapSelector
{
986 typedef TCMalloc_PageMap3
<BITS
-kPageShift
> Type
;
987 typedef PackedCache
<BITS
, uint64_t> CacheType
;
990 // A two-level map for 32-bit machines
991 template <> class MapSelector
<32> {
993 typedef TCMalloc_PageMap2
<32-kPageShift
> Type
;
994 typedef PackedCache
<32-kPageShift
, uint16_t> CacheType
;
997 // -------------------------------------------------------------------------
998 // Page-level allocator
999 // * Eager coalescing
1001 // Heap for page-level allocation. We allow allocating and freeing a
1002 // contiguous runs of pages (called a "span").
1003 // -------------------------------------------------------------------------
1005 class TCMalloc_PageHeap
{
1009 // Allocate a run of "n" pages. Returns zero if out of memory.
1010 Span
* New(Length n
);
1012 // Delete the span "[p, p+n-1]".
1013 // REQUIRES: span was returned by earlier call to New() and
1014 // has not yet been deleted.
1015 void Delete(Span
* span
);
1017 // Mark an allocated span as being used for small objects of the
1018 // specified size-class.
1019 // REQUIRES: span was returned by an earlier call to New()
1020 // and has not yet been deleted.
1021 void RegisterSizeClass(Span
* span
, size_t sc
);
1023 // Split an allocated span into two spans: one of length "n" pages
1024 // followed by another span of length "span->length - n" pages.
1025 // Modifies "*span" to point to the first span of length "n" pages.
1026 // Returns a pointer to the second span.
1028 // REQUIRES: "0 < n < span->length"
1029 // REQUIRES: !span->free
1030 // REQUIRES: span->sizeclass == 0
1031 Span
* Split(Span
* span
, Length n
);
1033 // Return the descriptor for the specified page.
1034 inline Span
* GetDescriptor(PageID p
) const {
1035 return reinterpret_cast<Span
*>(pagemap_
.get(p
));
1039 inline Span
* GetDescriptorEnsureSafe(PageID p
)
1041 pagemap_
.Ensure(p
, 1);
1042 return GetDescriptor(p
);
1046 // Dump state to stderr
1048 void Dump(TCMalloc_Printer
* out
);
1051 // Return number of bytes allocated from system
1052 inline uint64_t SystemBytes() const { return system_bytes_
; }
1054 // Return number of free bytes in heap
1055 uint64_t FreeBytes() const {
1056 return (static_cast<uint64_t>(free_pages_
) << kPageShift
);
1060 bool CheckList(Span
* list
, Length min_pages
, Length max_pages
);
1062 // Release all pages on the free list for reuse by the OS:
1063 void ReleaseFreePages();
1065 // Return 0 if we have no information, or else the correct sizeclass for p.
1066 // Reads and writes to pagemap_cache_ do not require locking.
1067 // The entries are 64 bits on 64-bit hardware and 16 bits on
1068 // 32-bit hardware, and we don't mind raciness as long as each read of
1069 // an entry yields a valid entry, not a partially updated entry.
1070 size_t GetSizeClassIfCached(PageID p
) const {
1071 return pagemap_cache_
.GetOrDefault(p
, 0);
1073 void CacheSizeClass(PageID p
, size_t cl
) const { pagemap_cache_
.Put(p
, cl
); }
1076 // Pick the appropriate map and cache types based on pointer size
1077 typedef MapSelector
<8*sizeof(uintptr_t)>::Type PageMap
;
1078 typedef MapSelector
<8*sizeof(uintptr_t)>::CacheType PageMapCache
;
1080 mutable PageMapCache pagemap_cache_
;
1082 // We segregate spans of a given size into two circular linked
1083 // lists: one for normal spans, and one for spans whose memory
1084 // has been returned to the system.
1090 // List of free spans of length >= kMaxPages
1093 // Array mapping from span length to a doubly linked list of free spans
1094 SpanList free_
[kMaxPages
];
1096 // Number of pages kept in free lists
1097 uintptr_t free_pages_
;
1099 // Bytes allocated from system
1100 uint64_t system_bytes_
;
1102 bool GrowHeap(Length n
);
1104 // REQUIRES span->length >= n
1105 // Remove span from its free list, and move any leftover part of
1106 // span into appropriate free lists. Also update "span" to have
1107 // length exactly "n" and mark it as non-free so it can be returned
1110 // "released" is true iff "span" was found on a "returned" list.
1111 void Carve(Span
* span
, Length n
, bool released
);
1113 void RecordSpan(Span
* span
) {
1114 pagemap_
.set(span
->start
, span
);
1115 if (span
->length
> 1) {
1116 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1120 // Allocate a large span of length == n. If successful, returns a
1121 // span of exactly the specified length. Else, returns NULL.
1122 Span
* AllocLarge(Length n
);
1124 // Incrementally release some memory to the system.
1125 // IncrementalScavenge(n) is called whenever n pages are freed.
1126 void IncrementalScavenge(Length n
);
1128 // Number of pages to deallocate before doing more scavenging
1129 int64_t scavenge_counter_
;
1131 // Index of last free list we scavenged
1132 size_t scavenge_index_
;
1134 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
1135 friend class FastMallocZone
;
1139 void TCMalloc_PageHeap::init()
1141 pagemap_
.init(MetaDataAlloc
);
1142 pagemap_cache_
= PageMapCache(0);
1145 scavenge_counter_
= 0;
1146 // Start scavenging at kMaxPages list
1147 scavenge_index_
= kMaxPages
-1;
1148 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
1149 DLL_Init(&large_
.normal
);
1150 DLL_Init(&large_
.returned
);
1151 for (size_t i
= 0; i
< kMaxPages
; i
++) {
1152 DLL_Init(&free_
[i
].normal
);
1153 DLL_Init(&free_
[i
].returned
);
1157 inline Span
* TCMalloc_PageHeap::New(Length n
) {
1161 // Find first size >= n that has a non-empty list
1162 for (Length s
= n
; s
< kMaxPages
; s
++) {
1164 bool released
= false;
1165 if (!DLL_IsEmpty(&free_
[s
].normal
)) {
1166 // Found normal span
1167 ll
= &free_
[s
].normal
;
1168 } else if (!DLL_IsEmpty(&free_
[s
].returned
)) {
1169 // Found returned span; reallocate it
1170 ll
= &free_
[s
].returned
;
1173 // Keep looking in larger classes
1177 Span
* result
= ll
->next
;
1178 Carve(result
, n
, released
);
1184 Span
* result
= AllocLarge(n
);
1185 if (result
!= NULL
) return result
;
1187 // Grow the heap and try again
1193 return AllocLarge(n
);
1196 Span
* TCMalloc_PageHeap::AllocLarge(Length n
) {
1197 // find the best span (closest to n in size).
1198 // The following loops implements address-ordered best-fit.
1199 bool from_released
= false;
1202 // Search through normal list
1203 for (Span
* span
= large_
.normal
.next
;
1204 span
!= &large_
.normal
;
1205 span
= span
->next
) {
1206 if (span
->length
>= n
) {
1208 || (span
->length
< best
->length
)
1209 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1211 from_released
= false;
1216 // Search through released list in case it has a better fit
1217 for (Span
* span
= large_
.returned
.next
;
1218 span
!= &large_
.returned
;
1219 span
= span
->next
) {
1220 if (span
->length
>= n
) {
1222 || (span
->length
< best
->length
)
1223 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1225 from_released
= true;
1231 Carve(best
, n
, from_released
);
1239 Span
* TCMalloc_PageHeap::Split(Span
* span
, Length n
) {
1241 ASSERT(n
< span
->length
);
1242 ASSERT(!span
->free
);
1243 ASSERT(span
->sizeclass
== 0);
1244 Event(span
, 'T', n
);
1246 const Length extra
= span
->length
- n
;
1247 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1248 Event(leftover
, 'U', extra
);
1249 RecordSpan(leftover
);
1250 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
1256 inline void TCMalloc_PageHeap::Carve(Span
* span
, Length n
, bool released
) {
1260 Event(span
, 'A', n
);
1262 const int extra
= static_cast<int>(span
->length
- n
);
1265 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1267 Event(leftover
, 'S', extra
);
1268 RecordSpan(leftover
);
1270 // Place leftover span on appropriate free list
1271 SpanList
* listpair
= (static_cast<size_t>(extra
) < kMaxPages
) ? &free_
[extra
] : &large_
;
1272 Span
* dst
= released
? &listpair
->returned
: &listpair
->normal
;
1273 DLL_Prepend(dst
, leftover
);
1276 pagemap_
.set(span
->start
+ n
- 1, span
);
1280 inline void TCMalloc_PageHeap::Delete(Span
* span
) {
1282 ASSERT(!span
->free
);
1283 ASSERT(span
->length
> 0);
1284 ASSERT(GetDescriptor(span
->start
) == span
);
1285 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
1286 span
->sizeclass
= 0;
1289 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1290 // necessary. We do not bother resetting the stale pagemap
1291 // entries for the pieces we are merging together because we only
1292 // care about the pagemap entries for the boundaries.
1294 // Note that the spans we merge into "span" may come out of
1295 // a "returned" list. For simplicity, we move these into the
1296 // "normal" list of the appropriate size class.
1297 const PageID p
= span
->start
;
1298 const Length n
= span
->length
;
1299 Span
* prev
= GetDescriptor(p
-1);
1300 if (prev
!= NULL
&& prev
->free
) {
1301 // Merge preceding span into this span
1302 ASSERT(prev
->start
+ prev
->length
== p
);
1303 const Length len
= prev
->length
;
1307 span
->length
+= len
;
1308 pagemap_
.set(span
->start
, span
);
1309 Event(span
, 'L', len
);
1311 Span
* next
= GetDescriptor(p
+n
);
1312 if (next
!= NULL
&& next
->free
) {
1313 // Merge next span into this span
1314 ASSERT(next
->start
== p
+n
);
1315 const Length len
= next
->length
;
1318 span
->length
+= len
;
1319 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1320 Event(span
, 'R', len
);
1323 Event(span
, 'D', span
->length
);
1325 if (span
->length
< kMaxPages
) {
1326 DLL_Prepend(&free_
[span
->length
].normal
, span
);
1328 DLL_Prepend(&large_
.normal
, span
);
1332 IncrementalScavenge(n
);
1336 void TCMalloc_PageHeap::IncrementalScavenge(Length n
) {
1337 // Fast path; not yet time to release memory
1338 scavenge_counter_
-= n
;
1339 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
1341 static const size_t kDefaultReleaseDelay
= 64;
1343 // Find index of free list to scavenge
1344 size_t index
= scavenge_index_
+ 1;
1345 for (size_t i
= 0; i
< kMaxPages
+1; i
++) {
1346 if (index
> kMaxPages
) index
= 0;
1347 SpanList
* slist
= (index
== kMaxPages
) ? &large_
: &free_
[index
];
1348 if (!DLL_IsEmpty(&slist
->normal
)) {
1349 // Release the last span on the normal portion of this list
1350 Span
* s
= slist
->normal
.prev
;
1352 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1353 static_cast<size_t>(s
->length
<< kPageShift
));
1354 DLL_Prepend(&slist
->returned
, s
);
1356 scavenge_counter_
= std::max
<size_t>(16UL, std::min
<size_t>(kDefaultReleaseDelay
, kDefaultReleaseDelay
- (free_pages_
/ kDefaultReleaseDelay
)));
1358 if (index
== kMaxPages
&& !DLL_IsEmpty(&slist
->normal
))
1359 scavenge_index_
= index
- 1;
1361 scavenge_index_
= index
;
1367 // Nothing to scavenge, delay for a while
1368 scavenge_counter_
= kDefaultReleaseDelay
;
1371 void TCMalloc_PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
1372 // Associate span object with all interior pages as well
1373 ASSERT(!span
->free
);
1374 ASSERT(GetDescriptor(span
->start
) == span
);
1375 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
1376 Event(span
, 'C', sc
);
1377 span
->sizeclass
= static_cast<unsigned int>(sc
);
1378 for (Length i
= 1; i
< span
->length
-1; i
++) {
1379 pagemap_
.set(span
->start
+i
, span
);
1384 static double PagesToMB(uint64_t pages
) {
1385 return (pages
<< kPageShift
) / 1048576.0;
1388 void TCMalloc_PageHeap::Dump(TCMalloc_Printer
* out
) {
1389 int nonempty_sizes
= 0;
1390 for (int s
= 0; s
< kMaxPages
; s
++) {
1391 if (!DLL_IsEmpty(&free_
[s
].normal
) || !DLL_IsEmpty(&free_
[s
].returned
)) {
1395 out
->printf("------------------------------------------------\n");
1396 out
->printf("PageHeap: %d sizes; %6.1f MB free\n",
1397 nonempty_sizes
, PagesToMB(free_pages_
));
1398 out
->printf("------------------------------------------------\n");
1399 uint64_t total_normal
= 0;
1400 uint64_t total_returned
= 0;
1401 for (int s
= 0; s
< kMaxPages
; s
++) {
1402 const int n_length
= DLL_Length(&free_
[s
].normal
);
1403 const int r_length
= DLL_Length(&free_
[s
].returned
);
1404 if (n_length
+ r_length
> 0) {
1405 uint64_t n_pages
= s
* n_length
;
1406 uint64_t r_pages
= s
* r_length
;
1407 total_normal
+= n_pages
;
1408 total_returned
+= r_pages
;
1409 out
->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1410 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1412 (n_length
+ r_length
),
1413 PagesToMB(n_pages
+ r_pages
),
1414 PagesToMB(total_normal
+ total_returned
),
1416 PagesToMB(total_returned
));
1420 uint64_t n_pages
= 0;
1421 uint64_t r_pages
= 0;
1424 out
->printf("Normal large spans:\n");
1425 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
1426 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1427 s
->length
, PagesToMB(s
->length
));
1428 n_pages
+= s
->length
;
1431 out
->printf("Unmapped large spans:\n");
1432 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
1433 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1434 s
->length
, PagesToMB(s
->length
));
1435 r_pages
+= s
->length
;
1438 total_normal
+= n_pages
;
1439 total_returned
+= r_pages
;
1440 out
->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1441 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1442 (n_spans
+ r_spans
),
1443 PagesToMB(n_pages
+ r_pages
),
1444 PagesToMB(total_normal
+ total_returned
),
1446 PagesToMB(total_returned
));
1450 bool TCMalloc_PageHeap::GrowHeap(Length n
) {
1451 ASSERT(kMaxPages
>= kMinSystemAlloc
);
1452 if (n
> kMaxValidPages
) return false;
1453 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
1455 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1458 // Try growing just "n" pages
1460 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);;
1462 if (ptr
== NULL
) return false;
1464 ask
= actual_size
>> kPageShift
;
1466 uint64_t old_system_bytes
= system_bytes_
;
1467 system_bytes_
+= (ask
<< kPageShift
);
1468 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1471 // If we have already a lot of pages allocated, just pre allocate a bunch of
1472 // memory for the page map. This prevents fragmentation by pagemap metadata
1473 // when a program keeps allocating and freeing large blocks.
1475 if (old_system_bytes
< kPageMapBigAllocationThreshold
1476 && system_bytes_
>= kPageMapBigAllocationThreshold
) {
1477 pagemap_
.PreallocateMoreMemory();
1480 // Make sure pagemap_ has entries for all of the new pages.
1481 // Plus ensure one before and one after so coalescing code
1482 // does not need bounds-checking.
1483 if (pagemap_
.Ensure(p
-1, ask
+2)) {
1484 // Pretend the new area is allocated and then Delete() it to
1485 // cause any necessary coalescing to occur.
1487 // We do not adjust free_pages_ here since Delete() will do it for us.
1488 Span
* span
= NewSpan(p
, ask
);
1494 // We could not allocate memory within "pagemap_"
1495 // TODO: Once we can return memory to the system, return the new span
1500 bool TCMalloc_PageHeap::Check() {
1501 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
1502 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
1503 CheckList(&large_
.normal
, kMaxPages
, 1000000000);
1504 CheckList(&large_
.returned
, kMaxPages
, 1000000000);
1505 for (Length s
= 1; s
< kMaxPages
; s
++) {
1506 CheckList(&free_
[s
].normal
, s
, s
);
1507 CheckList(&free_
[s
].returned
, s
, s
);
1513 bool TCMalloc_PageHeap::CheckList(Span
*, Length
, Length
) {
1517 bool TCMalloc_PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
) {
1518 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1519 CHECK_CONDITION(s
->free
);
1520 CHECK_CONDITION(s
->length
>= min_pages
);
1521 CHECK_CONDITION(s
->length
<= max_pages
);
1522 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
1523 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
1529 static void ReleaseFreeList(Span
* list
, Span
* returned
) {
1530 // Walk backwards through list so that when we push these
1531 // spans on the "returned" list, we preserve the order.
1532 while (!DLL_IsEmpty(list
)) {
1533 Span
* s
= list
->prev
;
1535 DLL_Prepend(returned
, s
);
1536 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1537 static_cast<size_t>(s
->length
<< kPageShift
));
1541 void TCMalloc_PageHeap::ReleaseFreePages() {
1542 for (Length s
= 0; s
< kMaxPages
; s
++) {
1543 ReleaseFreeList(&free_
[s
].normal
, &free_
[s
].returned
);
1545 ReleaseFreeList(&large_
.normal
, &large_
.returned
);
1549 //-------------------------------------------------------------------
1551 //-------------------------------------------------------------------
1553 class TCMalloc_ThreadCache_FreeList
{
1555 void* list_
; // Linked list of nodes
1556 uint16_t length_
; // Current length
1557 uint16_t lowater_
; // Low water mark for list length
1566 // Return current length of list
1567 int length() const {
1572 bool empty() const {
1573 return list_
== NULL
;
1576 // Low-water mark management
1577 int lowwatermark() const { return lowater_
; }
1578 void clear_lowwatermark() { lowater_
= length_
; }
1580 ALWAYS_INLINE
void Push(void* ptr
) {
1581 SLL_Push(&list_
, ptr
);
1585 void PushRange(int N
, void *start
, void *end
) {
1586 SLL_PushRange(&list_
, start
, end
);
1587 length_
= length_
+ static_cast<uint16_t>(N
);
1590 void PopRange(int N
, void **start
, void **end
) {
1591 SLL_PopRange(&list_
, N
, start
, end
);
1592 ASSERT(length_
>= N
);
1593 length_
= length_
- static_cast<uint16_t>(N
);
1594 if (length_
< lowater_
) lowater_
= length_
;
1597 ALWAYS_INLINE
void* Pop() {
1598 ASSERT(list_
!= NULL
);
1600 if (length_
< lowater_
) lowater_
= length_
;
1601 return SLL_Pop(&list_
);
1605 template <class Finder
, class Reader
>
1606 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1608 for (void* nextObject
= list_
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1609 finder
.visit(nextObject
);
1614 //-------------------------------------------------------------------
1615 // Data kept per thread
1616 //-------------------------------------------------------------------
1618 class TCMalloc_ThreadCache
{
1620 typedef TCMalloc_ThreadCache_FreeList FreeList
;
1622 typedef DWORD ThreadIdentifier
;
1624 typedef pthread_t ThreadIdentifier
;
1627 size_t size_
; // Combined size of data
1628 ThreadIdentifier tid_
; // Which thread owns it
1629 bool in_setspecific_
; // Called pthread_setspecific?
1630 FreeList list_
[kNumClasses
]; // Array indexed by size-class
1632 // We sample allocations, biased by the size of the allocation
1633 uint32_t rnd_
; // Cheap random number generator
1634 size_t bytes_until_sample_
; // Bytes until we sample next
1636 // Allocate a new heap. REQUIRES: pageheap_lock is held.
1637 static inline TCMalloc_ThreadCache
* NewHeap(ThreadIdentifier tid
);
1639 // Use only as pthread thread-specific destructor function.
1640 static void DestroyThreadCache(void* ptr
);
1642 // All ThreadCache objects are kept in a linked list (for stats collection)
1643 TCMalloc_ThreadCache
* next_
;
1644 TCMalloc_ThreadCache
* prev_
;
1646 void Init(ThreadIdentifier tid
);
1649 // Accessors (mostly just for printing stats)
1650 int freelist_length(size_t cl
) const { return list_
[cl
].length(); }
1652 // Total byte size in cache
1653 size_t Size() const { return size_
; }
1655 void* Allocate(size_t size
);
1656 void Deallocate(void* ptr
, size_t size_class
);
1658 void FetchFromCentralCache(size_t cl
, size_t allocationSize
);
1659 void ReleaseToCentralCache(size_t cl
, int N
);
1663 // Record allocation of "k" bytes. Return true iff allocation
1664 // should be sampled
1665 bool SampleAllocation(size_t k
);
1667 // Pick next sampling point
1668 void PickNextSample(size_t k
);
1670 static void InitModule();
1671 static void InitTSD();
1672 static TCMalloc_ThreadCache
* GetThreadHeap();
1673 static TCMalloc_ThreadCache
* GetCache();
1674 static TCMalloc_ThreadCache
* GetCacheIfPresent();
1675 static TCMalloc_ThreadCache
* CreateCacheIfNecessary();
1676 static void DeleteCache(TCMalloc_ThreadCache
* heap
);
1677 static void BecomeIdle();
1678 static void RecomputeThreadCacheSize();
1681 template <class Finder
, class Reader
>
1682 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1684 for (unsigned sizeClass
= 0; sizeClass
< kNumClasses
; sizeClass
++)
1685 list_
[sizeClass
].enumerateFreeObjects(finder
, reader
);
1690 //-------------------------------------------------------------------
1691 // Data kept per size-class in central cache
1692 //-------------------------------------------------------------------
1694 class TCMalloc_Central_FreeList
{
1696 void Init(size_t cl
);
1698 // These methods all do internal locking.
1700 // Insert the specified range into the central freelist. N is the number of
1701 // elements in the range.
1702 void InsertRange(void *start
, void *end
, int N
);
1704 // Returns the actual number of fetched elements into N.
1705 void RemoveRange(void **start
, void **end
, int *N
);
1707 // Returns the number of free objects in cache.
1709 SpinLockHolder
h(&lock_
);
1713 // Returns the number of free objects in the transfer cache.
1715 SpinLockHolder
h(&lock_
);
1716 return used_slots_
* num_objects_to_move
[size_class_
];
1720 template <class Finder
, class Reader
>
1721 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1723 for (Span
* span
= &empty_
; span
&& span
!= &empty_
; span
= (span
->next
? reader(span
->next
) : 0))
1724 ASSERT(!span
->objects
);
1726 ASSERT(!nonempty_
.objects
);
1727 for (Span
* span
= reader(nonempty_
.next
); span
&& span
!= &nonempty_
; span
= (span
->next
? reader(span
->next
) : 0)) {
1728 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1729 finder
.visit(nextObject
);
1735 // REQUIRES: lock_ is held
1736 // Remove object from cache and return.
1737 // Return NULL if no free entries in cache.
1738 void* FetchFromSpans();
1740 // REQUIRES: lock_ is held
1741 // Remove object from cache and return. Fetches
1742 // from pageheap if cache is empty. Only returns
1743 // NULL on allocation failure.
1744 void* FetchFromSpansSafe();
1746 // REQUIRES: lock_ is held
1747 // Release a linked list of objects to spans.
1748 // May temporarily release lock_.
1749 void ReleaseListToSpans(void *start
);
1751 // REQUIRES: lock_ is held
1752 // Release an object to spans.
1753 // May temporarily release lock_.
1754 void ReleaseToSpans(void* object
);
1756 // REQUIRES: lock_ is held
1757 // Populate cache by fetching from the page heap.
1758 // May temporarily release lock_.
1761 // REQUIRES: lock is held.
1762 // Tries to make room for a TCEntry. If the cache is full it will try to
1763 // expand it at the cost of some other cache size. Return false if there is
1765 bool MakeCacheSpace();
1767 // REQUIRES: lock_ for locked_size_class is held.
1768 // Picks a "random" size class to steal TCEntry slot from. In reality it
1769 // just iterates over the sizeclasses but does so without taking a lock.
1770 // Returns true on success.
1771 // May temporarily lock a "random" size class.
1772 static bool EvictRandomSizeClass(size_t locked_size_class
, bool force
);
1774 // REQUIRES: lock_ is *not* held.
1775 // Tries to shrink the Cache. If force is true it will relase objects to
1776 // spans if it allows it to shrink the cache. Return false if it failed to
1777 // shrink the cache. Decrements cache_size_ on succeess.
1778 // May temporarily take lock_. If it takes lock_, the locked_size_class
1779 // lock is released to the thread from holding two size class locks
1780 // concurrently which could lead to a deadlock.
1781 bool ShrinkCache(int locked_size_class
, bool force
);
1783 // This lock protects all the data members. cached_entries and cache_size_
1784 // may be looked at without holding the lock.
1787 // We keep linked lists of empty and non-empty spans.
1788 size_t size_class_
; // My size class
1789 Span empty_
; // Dummy header for list of empty spans
1790 Span nonempty_
; // Dummy header for list of non-empty spans
1791 size_t counter_
; // Number of free objects in cache entry
1793 // Here we reserve space for TCEntry cache slots. Since one size class can
1794 // end up getting all the TCEntries quota in the system we just preallocate
1795 // sufficient number of entries here.
1796 TCEntry tc_slots_
[kNumTransferEntries
];
1798 // Number of currently used cached entries in tc_slots_. This variable is
1799 // updated under a lock but can be read without one.
1800 int32_t used_slots_
;
1801 // The current number of slots for this size class. This is an
1802 // adaptive value that is increased if there is lots of traffic
1803 // on a given size class.
1804 int32_t cache_size_
;
1807 // Pad each CentralCache object to multiple of 64 bytes
1808 class TCMalloc_Central_FreeListPadded
: public TCMalloc_Central_FreeList
{
1810 char pad_
[(64 - (sizeof(TCMalloc_Central_FreeList
) % 64)) % 64];
1813 //-------------------------------------------------------------------
1815 //-------------------------------------------------------------------
1817 // Central cache -- a collection of free-lists, one per size-class.
1818 // We have a separate lock per free-list to reduce contention.
1819 static TCMalloc_Central_FreeListPadded central_cache
[kNumClasses
];
1821 // Page-level allocator
1822 static SpinLock pageheap_lock
= SPINLOCK_INITIALIZER
;
1825 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned
));
1827 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)];
1829 static bool phinited
= false;
1831 // Avoid extra level of indirection by making "pageheap" be just an alias
1832 // of pageheap_memory.
1835 TCMalloc_PageHeap
* m_pageHeap
;
1838 static inline TCMalloc_PageHeap
* getPageHeap()
1840 PageHeapUnion u
= { &pageheap_memory
[0] };
1841 return u
.m_pageHeap
;
1844 #define pageheap getPageHeap()
1846 // If TLS is available, we also store a copy
1847 // of the per-thread object in a __thread variable
1848 // since __thread variables are faster to read
1849 // than pthread_getspecific(). We still need
1850 // pthread_setspecific() because __thread
1851 // variables provide no way to run cleanup
1852 // code when a thread is destroyed.
1854 static __thread TCMalloc_ThreadCache
*threadlocal_heap
;
1856 // Thread-specific key. Initialization here is somewhat tricky
1857 // because some Linux startup code invokes malloc() before it
1858 // is in a good enough state to handle pthread_keycreate().
1859 // Therefore, we use TSD keys only after tsd_inited is set to true.
1860 // Until then, we use a slow path to get the heap object.
1861 static bool tsd_inited
= false;
1862 static pthread_key_t heap_key
;
1864 DWORD tlsIndex
= TLS_OUT_OF_INDEXES
;
1867 static ALWAYS_INLINE
void setThreadHeap(TCMalloc_ThreadCache
* heap
)
1869 // still do pthread_setspecific when using MSVC fast TLS to
1870 // benefit from the delete callback.
1871 pthread_setspecific(heap_key
, heap
);
1873 TlsSetValue(tlsIndex
, heap
);
1877 // Allocator for thread heaps
1878 static PageHeapAllocator
<TCMalloc_ThreadCache
> threadheap_allocator
;
1880 // Linked list of heap objects. Protected by pageheap_lock.
1881 static TCMalloc_ThreadCache
* thread_heaps
= NULL
;
1882 static int thread_heap_count
= 0;
1884 // Overall thread cache size. Protected by pageheap_lock.
1885 static size_t overall_thread_cache_size
= kDefaultOverallThreadCacheSize
;
1887 // Global per-thread cache size. Writes are protected by
1888 // pageheap_lock. Reads are done without any locking, which should be
1889 // fine as long as size_t can be written atomically and we don't place
1890 // invariants between this variable and other pieces of state.
1891 static volatile size_t per_thread_cache_size
= kMaxThreadCacheSize
;
1893 //-------------------------------------------------------------------
1894 // Central cache implementation
1895 //-------------------------------------------------------------------
1897 void TCMalloc_Central_FreeList::Init(size_t cl
) {
1901 DLL_Init(&nonempty_
);
1906 ASSERT(cache_size_
<= kNumTransferEntries
);
1909 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start
) {
1911 void *next
= SLL_Next(start
);
1912 ReleaseToSpans(start
);
1917 ALWAYS_INLINE
void TCMalloc_Central_FreeList::ReleaseToSpans(void* object
) {
1918 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
1919 Span
* span
= pageheap
->GetDescriptor(p
);
1920 ASSERT(span
!= NULL
);
1921 ASSERT(span
->refcount
> 0);
1923 // If span is empty, move it to non-empty list
1924 if (span
->objects
== NULL
) {
1926 DLL_Prepend(&nonempty_
, span
);
1927 Event(span
, 'N', 0);
1930 // The following check is expensive, so it is disabled by default
1932 // Check that object does not occur in list
1934 for (void* p
= span
->objects
; p
!= NULL
; p
= *((void**) p
)) {
1935 ASSERT(p
!= object
);
1938 ASSERT(got
+ span
->refcount
==
1939 (span
->length
<<kPageShift
)/ByteSizeForClass(span
->sizeclass
));
1944 if (span
->refcount
== 0) {
1945 Event(span
, '#', 0);
1946 counter_
-= (span
->length
<<kPageShift
) / ByteSizeForClass(span
->sizeclass
);
1949 // Release central list lock while operating on pageheap
1952 SpinLockHolder
h(&pageheap_lock
);
1953 pageheap
->Delete(span
);
1957 *(reinterpret_cast<void**>(object
)) = span
->objects
;
1958 span
->objects
= object
;
1962 ALWAYS_INLINE
bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
1963 size_t locked_size_class
, bool force
) {
1964 static int race_counter
= 0;
1965 int t
= race_counter
++; // Updated without a lock, but who cares.
1966 if (t
>= static_cast<int>(kNumClasses
)) {
1967 while (t
>= static_cast<int>(kNumClasses
)) {
1973 ASSERT(t
< static_cast<int>(kNumClasses
));
1974 if (t
== static_cast<int>(locked_size_class
)) return false;
1975 return central_cache
[t
].ShrinkCache(static_cast<int>(locked_size_class
), force
);
1978 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
1979 // Is there room in the cache?
1980 if (used_slots_
< cache_size_
) return true;
1981 // Check if we can expand this cache?
1982 if (cache_size_
== kNumTransferEntries
) return false;
1983 // Ok, we'll try to grab an entry from some other size class.
1984 if (EvictRandomSizeClass(size_class_
, false) ||
1985 EvictRandomSizeClass(size_class_
, true)) {
1986 // Succeeded in evicting, we're going to make our cache larger.
1995 class LockInverter
{
1997 SpinLock
*held_
, *temp_
;
1999 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
2000 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
2001 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
2005 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class
, bool force
) {
2006 // Start with a quick check without taking a lock.
2007 if (cache_size_
== 0) return false;
2008 // We don't evict from a full cache unless we are 'forcing'.
2009 if (force
== false && used_slots_
== cache_size_
) return false;
2011 // Grab lock, but first release the other lock held by this thread. We use
2012 // the lock inverter to ensure that we never hold two size class locks
2013 // concurrently. That can create a deadlock because there is no well
2014 // defined nesting order.
2015 LockInverter
li(¢ral_cache
[locked_size_class
].lock_
, &lock_
);
2016 ASSERT(used_slots_
<= cache_size_
);
2017 ASSERT(0 <= cache_size_
);
2018 if (cache_size_
== 0) return false;
2019 if (used_slots_
== cache_size_
) {
2020 if (force
== false) return false;
2021 // ReleaseListToSpans releases the lock, so we have to make all the
2022 // updates to the central list before calling it.
2025 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
2032 void TCMalloc_Central_FreeList::InsertRange(void *start
, void *end
, int N
) {
2033 SpinLockHolder
h(&lock_
);
2034 if (N
== num_objects_to_move
[size_class_
] &&
2036 int slot
= used_slots_
++;
2038 ASSERT(slot
< kNumTransferEntries
);
2039 TCEntry
*entry
= &tc_slots_
[slot
];
2040 entry
->head
= start
;
2044 ReleaseListToSpans(start
);
2047 void TCMalloc_Central_FreeList::RemoveRange(void **start
, void **end
, int *N
) {
2051 SpinLockHolder
h(&lock_
);
2052 if (num
== num_objects_to_move
[size_class_
] && used_slots_
> 0) {
2053 int slot
= --used_slots_
;
2055 TCEntry
*entry
= &tc_slots_
[slot
];
2056 *start
= entry
->head
;
2061 // TODO: Prefetch multiple TCEntries?
2062 void *tail
= FetchFromSpansSafe();
2064 // We are completely out of memory.
2065 *start
= *end
= NULL
;
2070 SLL_SetNext(tail
, NULL
);
2073 while (count
< num
) {
2074 void *t
= FetchFromSpans();
2085 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2086 void *t
= FetchFromSpans();
2089 t
= FetchFromSpans();
2094 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2095 if (DLL_IsEmpty(&nonempty_
)) return NULL
;
2096 Span
* span
= nonempty_
.next
;
2098 ASSERT(span
->objects
!= NULL
);
2100 void* result
= span
->objects
;
2101 span
->objects
= *(reinterpret_cast<void**>(result
));
2102 if (span
->objects
== NULL
) {
2103 // Move to empty list
2105 DLL_Prepend(&empty_
, span
);
2106 Event(span
, 'E', 0);
2112 // Fetch memory from the system and add to the central cache freelist.
2113 ALWAYS_INLINE
void TCMalloc_Central_FreeList::Populate() {
2114 // Release central list lock while operating on pageheap
2116 const size_t npages
= class_to_pages
[size_class_
];
2120 SpinLockHolder
h(&pageheap_lock
);
2121 span
= pageheap
->New(npages
);
2122 if (span
) pageheap
->RegisterSizeClass(span
, size_class_
);
2125 MESSAGE("allocation failed: %d\n", errno
);
2129 ASSERT(span
->length
== npages
);
2130 // Cache sizeclass info eagerly. Locking is not necessary.
2131 // (Instead of being eager, we could just replace any stale info
2132 // about this span, but that seems to be no better in practice.)
2133 for (size_t i
= 0; i
< npages
; i
++) {
2134 pageheap
->CacheSizeClass(span
->start
+ i
, size_class_
);
2137 // Split the block into pieces and add to the free-list
2138 // TODO: coloring of objects to avoid cache conflicts?
2139 void** tail
= &span
->objects
;
2140 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
2141 char* limit
= ptr
+ (npages
<< kPageShift
);
2142 const size_t size
= ByteSizeForClass(size_class_
);
2145 while ((nptr
= ptr
+ size
) <= limit
) {
2147 tail
= reinterpret_cast<void**>(ptr
);
2151 ASSERT(ptr
<= limit
);
2153 span
->refcount
= 0; // No sub-object in use yet
2155 // Add span to list of non-empty spans
2157 DLL_Prepend(&nonempty_
, span
);
2161 //-------------------------------------------------------------------
2162 // TCMalloc_ThreadCache implementation
2163 //-------------------------------------------------------------------
2165 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k
) {
2166 if (bytes_until_sample_
< k
) {
2170 bytes_until_sample_
-= k
;
2175 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid
) {
2180 in_setspecific_
= false;
2181 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2185 // Initialize RNG -- run it for a bit to get to good values
2186 bytes_until_sample_
= 0;
2187 rnd_
= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2188 for (int i
= 0; i
< 100; i
++) {
2189 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter
* 2));
2193 void TCMalloc_ThreadCache::Cleanup() {
2194 // Put unused memory back into central cache
2195 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2196 if (list_
[cl
].length() > 0) {
2197 ReleaseToCentralCache(cl
, list_
[cl
].length());
2202 ALWAYS_INLINE
void* TCMalloc_ThreadCache::Allocate(size_t size
) {
2203 ASSERT(size
<= kMaxSize
);
2204 const size_t cl
= SizeClass(size
);
2205 FreeList
* list
= &list_
[cl
];
2206 size_t allocationSize
= ByteSizeForClass(cl
);
2207 if (list
->empty()) {
2208 FetchFromCentralCache(cl
, allocationSize
);
2209 if (list
->empty()) return NULL
;
2211 size_
-= allocationSize
;
2215 inline void TCMalloc_ThreadCache::Deallocate(void* ptr
, size_t cl
) {
2216 size_
+= ByteSizeForClass(cl
);
2217 FreeList
* list
= &list_
[cl
];
2219 // If enough data is free, put back into central cache
2220 if (list
->length() > kMaxFreeListLength
) {
2221 ReleaseToCentralCache(cl
, num_objects_to_move
[cl
]);
2223 if (size_
>= per_thread_cache_size
) Scavenge();
2226 // Remove some objects of class "cl" from central cache and add to thread heap
2227 ALWAYS_INLINE
void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl
, size_t allocationSize
) {
2228 int fetch_count
= num_objects_to_move
[cl
];
2230 central_cache
[cl
].RemoveRange(&start
, &end
, &fetch_count
);
2231 list_
[cl
].PushRange(fetch_count
, start
, end
);
2232 size_
+= allocationSize
* fetch_count
;
2235 // Remove some objects of class "cl" from thread heap and add to central cache
2236 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl
, int N
) {
2238 FreeList
* src
= &list_
[cl
];
2239 if (N
> src
->length()) N
= src
->length();
2240 size_
-= N
*ByteSizeForClass(cl
);
2242 // We return prepackaged chains of the correct size to the central cache.
2243 // TODO: Use the same format internally in the thread caches?
2244 int batch_size
= num_objects_to_move
[cl
];
2245 while (N
> batch_size
) {
2247 src
->PopRange(batch_size
, &head
, &tail
);
2248 central_cache
[cl
].InsertRange(head
, tail
, batch_size
);
2252 src
->PopRange(N
, &head
, &tail
);
2253 central_cache
[cl
].InsertRange(head
, tail
, N
);
2256 // Release idle memory to the central cache
2257 inline void TCMalloc_ThreadCache::Scavenge() {
2258 // If the low-water mark for the free list is L, it means we would
2259 // not have had to allocate anything from the central cache even if
2260 // we had reduced the free list size by L. We aim to get closer to
2261 // that situation by dropping L/2 nodes from the free list. This
2262 // may not release much memory, but if so we will call scavenge again
2263 // pretty soon and the low-water marks will be high on that call.
2264 //int64 start = CycleClock::Now();
2266 for (size_t cl
= 0; cl
< kNumClasses
; cl
++) {
2267 FreeList
* list
= &list_
[cl
];
2268 const int lowmark
= list
->lowwatermark();
2270 const int drop
= (lowmark
> 1) ? lowmark
/2 : 1;
2271 ReleaseToCentralCache(cl
, drop
);
2273 list
->clear_lowwatermark();
2276 //int64 finish = CycleClock::Now();
2278 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2281 void TCMalloc_ThreadCache::PickNextSample(size_t k
) {
2282 // Make next "random" number
2283 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2284 static const uint32_t kPoly
= (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2286 rnd_
= (r
<< 1) ^ ((static_cast<int32_t>(r
) >> 31) & kPoly
);
2288 // Next point is "rnd_ % (sample_period)". I.e., average
2289 // increment is "sample_period/2".
2290 const int flag_value
= static_cast<int>(FLAGS_tcmalloc_sample_parameter
);
2291 static int last_flag_value
= -1;
2293 if (flag_value
!= last_flag_value
) {
2294 SpinLockHolder
h(&sample_period_lock
);
2296 for (i
= 0; i
< (static_cast<int>(sizeof(primes_list
)/sizeof(primes_list
[0])) - 1); i
++) {
2297 if (primes_list
[i
] >= flag_value
) {
2301 sample_period
= primes_list
[i
];
2302 last_flag_value
= flag_value
;
2305 bytes_until_sample_
+= rnd_
% sample_period
;
2307 if (k
> (static_cast<size_t>(-1) >> 2)) {
2308 // If the user has asked for a huge allocation then it is possible
2309 // for the code below to loop infinitely. Just return (note that
2310 // this throws off the sampling accuracy somewhat, but a user who
2311 // is allocating more than 1G of memory at a time can live with a
2312 // minor inaccuracy in profiling of small allocations, and also
2313 // would rather not wait for the loop below to terminate).
2317 while (bytes_until_sample_
< k
) {
2318 // Increase bytes_until_sample_ by enough average sampling periods
2319 // (sample_period >> 1) to allow us to sample past the current
2321 bytes_until_sample_
+= (sample_period
>> 1);
2324 bytes_until_sample_
-= k
;
2327 void TCMalloc_ThreadCache::InitModule() {
2328 // There is a slight potential race here because of double-checked
2329 // locking idiom. However, as long as the program does a small
2330 // allocation before switching to multi-threaded mode, we will be
2331 // fine. We increase the chances of doing such a small allocation
2332 // by doing one in the constructor of the module_enter_exit_hook
2333 // object declared below.
2334 SpinLockHolder
h(&pageheap_lock
);
2340 threadheap_allocator
.Init();
2341 span_allocator
.Init();
2342 span_allocator
.New(); // Reduce cache conflicts
2343 span_allocator
.New(); // Reduce cache conflicts
2344 stacktrace_allocator
.Init();
2345 DLL_Init(&sampled_objects
);
2346 for (size_t i
= 0; i
< kNumClasses
; ++i
) {
2347 central_cache
[i
].Init(i
);
2351 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
2352 FastMallocZone::init();
2357 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid
) {
2358 // Create the heap and add it to the linked list
2359 TCMalloc_ThreadCache
*heap
= threadheap_allocator
.New();
2361 heap
->next_
= thread_heaps
;
2363 if (thread_heaps
!= NULL
) thread_heaps
->prev_
= heap
;
2364 thread_heaps
= heap
;
2365 thread_heap_count
++;
2366 RecomputeThreadCacheSize();
2370 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetThreadHeap() {
2372 // __thread is faster, but only when the kernel supports it
2373 if (KernelSupportsTLS())
2374 return threadlocal_heap
;
2375 #elif COMPILER(MSVC)
2376 return static_cast<TCMalloc_ThreadCache
*>(TlsGetValue(tlsIndex
));
2378 return static_cast<TCMalloc_ThreadCache
*>(pthread_getspecific(heap_key
));
2382 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCache() {
2383 TCMalloc_ThreadCache
* ptr
= NULL
;
2387 ptr
= GetThreadHeap();
2389 if (ptr
== NULL
) ptr
= CreateCacheIfNecessary();
2393 // In deletion paths, we do not try to create a thread-cache. This is
2394 // because we may be in the thread destruction code and may have
2395 // already cleaned up the cache for this thread.
2396 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCacheIfPresent() {
2397 if (!tsd_inited
) return NULL
;
2398 void* const p
= GetThreadHeap();
2399 return reinterpret_cast<TCMalloc_ThreadCache
*>(p
);
2402 void TCMalloc_ThreadCache::InitTSD() {
2403 ASSERT(!tsd_inited
);
2404 pthread_key_create(&heap_key
, DestroyThreadCache
);
2406 tlsIndex
= TlsAlloc();
2411 // We may have used a fake pthread_t for the main thread. Fix it.
2413 memset(&zero
, 0, sizeof(zero
));
2416 SpinLockHolder
h(&pageheap_lock
);
2418 ASSERT(pageheap_lock
.IsHeld());
2420 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2423 h
->tid_
= GetCurrentThreadId();
2426 if (pthread_equal(h
->tid_
, zero
)) {
2427 h
->tid_
= pthread_self();
2433 TCMalloc_ThreadCache
* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2434 // Initialize per-thread data if necessary
2435 TCMalloc_ThreadCache
* heap
= NULL
;
2437 SpinLockHolder
h(&pageheap_lock
);
2444 me
= GetCurrentThreadId();
2447 // Early on in glibc's life, we cannot even call pthread_self()
2450 memset(&me
, 0, sizeof(me
));
2452 me
= pthread_self();
2456 // This may be a recursive malloc call from pthread_setspecific()
2457 // In that case, the heap for this thread has already been created
2458 // and added to the linked list. So we search for that first.
2459 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2461 if (h
->tid_
== me
) {
2463 if (pthread_equal(h
->tid_
, me
)) {
2470 if (heap
== NULL
) heap
= NewHeap(me
);
2473 // We call pthread_setspecific() outside the lock because it may
2474 // call malloc() recursively. The recursive call will never get
2475 // here again because it will find the already allocated heap in the
2476 // linked list of heaps.
2477 if (!heap
->in_setspecific_
&& tsd_inited
) {
2478 heap
->in_setspecific_
= true;
2479 setThreadHeap(heap
);
2484 void TCMalloc_ThreadCache::BecomeIdle() {
2485 if (!tsd_inited
) return; // No caches yet
2486 TCMalloc_ThreadCache
* heap
= GetThreadHeap();
2487 if (heap
== NULL
) return; // No thread cache to remove
2488 if (heap
->in_setspecific_
) return; // Do not disturb the active caller
2490 heap
->in_setspecific_
= true;
2491 pthread_setspecific(heap_key
, NULL
);
2493 // Also update the copy in __thread
2494 threadlocal_heap
= NULL
;
2496 heap
->in_setspecific_
= false;
2497 if (GetThreadHeap() == heap
) {
2498 // Somehow heap got reinstated by a recursive call to malloc
2499 // from pthread_setspecific. We give up in this case.
2503 // We can now get rid of the heap
2507 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr
) {
2508 // Note that "ptr" cannot be NULL since pthread promises not
2509 // to invoke the destructor on NULL values, but for safety,
2511 if (ptr
== NULL
) return;
2513 // Prevent fast path of GetThreadHeap() from returning heap.
2514 threadlocal_heap
= NULL
;
2516 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache
*>(ptr
));
2519 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache
* heap
) {
2520 // Remove all memory from heap
2523 // Remove from linked list
2524 SpinLockHolder
h(&pageheap_lock
);
2525 if (heap
->next_
!= NULL
) heap
->next_
->prev_
= heap
->prev_
;
2526 if (heap
->prev_
!= NULL
) heap
->prev_
->next_
= heap
->next_
;
2527 if (thread_heaps
== heap
) thread_heaps
= heap
->next_
;
2528 thread_heap_count
--;
2529 RecomputeThreadCacheSize();
2531 threadheap_allocator
.Delete(heap
);
2534 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
2535 // Divide available space across threads
2536 int n
= thread_heap_count
> 0 ? thread_heap_count
: 1;
2537 size_t space
= overall_thread_cache_size
/ n
;
2539 // Limit to allowed range
2540 if (space
< kMinThreadCacheSize
) space
= kMinThreadCacheSize
;
2541 if (space
> kMaxThreadCacheSize
) space
= kMaxThreadCacheSize
;
2543 per_thread_cache_size
= space
;
2546 void TCMalloc_ThreadCache::Print() const {
2547 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2548 MESSAGE(" %5" PRIuS
" : %4d len; %4d lo\n",
2549 ByteSizeForClass(cl
),
2551 list_
[cl
].lowwatermark());
2555 // Extract interesting stats
2556 struct TCMallocStats
{
2557 uint64_t system_bytes
; // Bytes alloced from system
2558 uint64_t thread_bytes
; // Bytes in thread caches
2559 uint64_t central_bytes
; // Bytes in central cache
2560 uint64_t transfer_bytes
; // Bytes in central transfer cache
2561 uint64_t pageheap_bytes
; // Bytes in page heap
2562 uint64_t metadata_bytes
; // Bytes alloced for metadata
2566 // Get stats into "r". Also get per-size-class counts if class_count != NULL
2567 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
) {
2568 r
->central_bytes
= 0;
2569 r
->transfer_bytes
= 0;
2570 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2571 const int length
= central_cache
[cl
].length();
2572 const int tc_length
= central_cache
[cl
].tc_length();
2573 r
->central_bytes
+= static_cast<uint64_t>(ByteSizeForClass(cl
)) * length
;
2574 r
->transfer_bytes
+=
2575 static_cast<uint64_t>(ByteSizeForClass(cl
)) * tc_length
;
2576 if (class_count
) class_count
[cl
] = length
+ tc_length
;
2579 // Add stats from per-thread heaps
2580 r
->thread_bytes
= 0;
2582 SpinLockHolder
h(&pageheap_lock
);
2583 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2584 r
->thread_bytes
+= h
->Size();
2586 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2587 class_count
[cl
] += h
->freelist_length(cl
);
2594 SpinLockHolder
h(&pageheap_lock
);
2595 r
->system_bytes
= pageheap
->SystemBytes();
2596 r
->metadata_bytes
= metadata_system_bytes
;
2597 r
->pageheap_bytes
= pageheap
->FreeBytes();
2603 // WRITE stats to "out"
2604 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
2605 TCMallocStats stats
;
2606 uint64_t class_count
[kNumClasses
];
2607 ExtractStats(&stats
, (level
>= 2 ? class_count
: NULL
));
2610 out
->printf("------------------------------------------------\n");
2611 uint64_t cumulative
= 0;
2612 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2613 if (class_count
[cl
] > 0) {
2614 uint64_t class_bytes
= class_count
[cl
] * ByteSizeForClass(cl
);
2615 cumulative
+= class_bytes
;
2616 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
2617 "%8" PRIu64
" objs; %5.1f MB; %5.1f cum MB\n",
2618 cl
, ByteSizeForClass(cl
),
2620 class_bytes
/ 1048576.0,
2621 cumulative
/ 1048576.0);
2625 SpinLockHolder
h(&pageheap_lock
);
2626 pageheap
->Dump(out
);
2629 const uint64_t bytes_in_use
= stats
.system_bytes
2630 - stats
.pageheap_bytes
2631 - stats
.central_bytes
2632 - stats
.transfer_bytes
2633 - stats
.thread_bytes
;
2635 out
->printf("------------------------------------------------\n"
2636 "MALLOC: %12" PRIu64
" Heap size\n"
2637 "MALLOC: %12" PRIu64
" Bytes in use by application\n"
2638 "MALLOC: %12" PRIu64
" Bytes free in page heap\n"
2639 "MALLOC: %12" PRIu64
" Bytes free in central cache\n"
2640 "MALLOC: %12" PRIu64
" Bytes free in transfer cache\n"
2641 "MALLOC: %12" PRIu64
" Bytes free in thread caches\n"
2642 "MALLOC: %12" PRIu64
" Spans in use\n"
2643 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
2644 "MALLOC: %12" PRIu64
" Metadata allocated\n"
2645 "------------------------------------------------\n",
2648 stats
.pageheap_bytes
,
2649 stats
.central_bytes
,
2650 stats
.transfer_bytes
,
2652 uint64_t(span_allocator
.inuse()),
2653 uint64_t(threadheap_allocator
.inuse()),
2654 stats
.metadata_bytes
);
2657 static void PrintStats(int level
) {
2658 const int kBufferSize
= 16 << 10;
2659 char* buffer
= new char[kBufferSize
];
2660 TCMalloc_Printer
printer(buffer
, kBufferSize
);
2661 DumpStats(&printer
, level
);
2662 write(STDERR_FILENO
, buffer
, strlen(buffer
));
2666 static void** DumpStackTraces() {
2667 // Count how much space we need
2668 int needed_slots
= 0;
2670 SpinLockHolder
h(&pageheap_lock
);
2671 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2672 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2673 needed_slots
+= 3 + stack
->depth
;
2675 needed_slots
+= 100; // Slop in case sample grows
2676 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
2679 void** result
= new void*[needed_slots
];
2680 if (result
== NULL
) {
2681 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
2686 SpinLockHolder
h(&pageheap_lock
);
2688 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2689 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
2690 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2691 if (used_slots
+ 3 + stack
->depth
>= needed_slots
) {
2696 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
2697 result
[used_slots
+1] = reinterpret_cast<void*>(stack
->size
);
2698 result
[used_slots
+2] = reinterpret_cast<void*>(stack
->depth
);
2699 for (int d
= 0; d
< stack
->depth
; d
++) {
2700 result
[used_slots
+3+d
] = stack
->stack
[d
];
2702 used_slots
+= 3 + stack
->depth
;
2704 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
2711 // TCMalloc's support for extra malloc interfaces
2712 class TCMallocImplementation
: public MallocExtension
{
2714 virtual void GetStats(char* buffer
, int buffer_length
) {
2715 ASSERT(buffer_length
> 0);
2716 TCMalloc_Printer
printer(buffer
, buffer_length
);
2718 // Print level one stats unless lots of space is available
2719 if (buffer_length
< 10000) {
2720 DumpStats(&printer
, 1);
2722 DumpStats(&printer
, 2);
2726 virtual void** ReadStackTraces() {
2727 return DumpStackTraces();
2730 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
2731 ASSERT(name
!= NULL
);
2733 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
2734 TCMallocStats stats
;
2735 ExtractStats(&stats
, NULL
);
2736 *value
= stats
.system_bytes
2737 - stats
.thread_bytes
2738 - stats
.central_bytes
2739 - stats
.pageheap_bytes
;
2743 if (strcmp(name
, "generic.heap_size") == 0) {
2744 TCMallocStats stats
;
2745 ExtractStats(&stats
, NULL
);
2746 *value
= stats
.system_bytes
;
2750 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
2751 // We assume that bytes in the page heap are not fragmented too
2752 // badly, and are therefore available for allocation.
2753 SpinLockHolder
l(&pageheap_lock
);
2754 *value
= pageheap
->FreeBytes();
2758 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2759 SpinLockHolder
l(&pageheap_lock
);
2760 *value
= overall_thread_cache_size
;
2764 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
2765 TCMallocStats stats
;
2766 ExtractStats(&stats
, NULL
);
2767 *value
= stats
.thread_bytes
;
2774 virtual bool SetNumericProperty(const char* name
, size_t value
) {
2775 ASSERT(name
!= NULL
);
2777 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2778 // Clip the value to a reasonable range
2779 if (value
< kMinThreadCacheSize
) value
= kMinThreadCacheSize
;
2780 if (value
> (1<<30)) value
= (1<<30); // Limit to 1GB
2782 SpinLockHolder
l(&pageheap_lock
);
2783 overall_thread_cache_size
= static_cast<size_t>(value
);
2784 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
2791 virtual void MarkThreadIdle() {
2792 TCMalloc_ThreadCache::BecomeIdle();
2795 virtual void ReleaseFreeMemory() {
2796 SpinLockHolder
h(&pageheap_lock
);
2797 pageheap
->ReleaseFreePages();
2802 // The constructor allocates an object to ensure that initialization
2803 // runs before main(), and therefore we do not have a chance to become
2804 // multi-threaded before initialization. We also create the TSD key
2805 // here. Presumably by the time this constructor runs, glibc is in
2806 // good enough shape to handle pthread_key_create().
2808 // The constructor also takes the opportunity to tell STL to use
2809 // tcmalloc. We want to do this early, before construct time, so
2810 // all user STL allocations go through tcmalloc (which works really
2813 // The destructor prints stats when the program exits.
2814 class TCMallocGuard
{
2818 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
2819 // Check whether the kernel also supports TLS (needs to happen at runtime)
2820 CheckIfKernelSupportsTLS();
2823 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
2824 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
2828 TCMalloc_ThreadCache::InitTSD();
2831 MallocExtension::Register(new TCMallocImplementation
);
2837 const char* env
= getenv("MALLOCSTATS");
2839 int level
= atoi(env
);
2840 if (level
< 1) level
= 1;
2844 UnpatchWindowsFunctions();
2851 static TCMallocGuard module_enter_exit_hook
;
2855 //-------------------------------------------------------------------
2856 // Helpers for the exported routines below
2857 //-------------------------------------------------------------------
2861 static Span
* DoSampledAllocation(size_t size
) {
2863 // Grab the stack trace outside the heap lock
2865 tmp
.depth
= GetStackTrace(tmp
.stack
, kMaxStackDepth
, 1);
2868 SpinLockHolder
h(&pageheap_lock
);
2870 Span
*span
= pageheap
->New(pages(size
== 0 ? 1 : size
));
2875 // Allocate stack trace
2876 StackTrace
*stack
= stacktrace_allocator
.New();
2877 if (stack
== NULL
) {
2878 // Sampling failed because of lack of memory
2884 span
->objects
= stack
;
2885 DLL_Prepend(&sampled_objects
, span
);
2891 static inline bool CheckCachedSizeClass(void *ptr
) {
2892 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
2893 size_t cached_value
= pageheap
->GetSizeClassIfCached(p
);
2894 return cached_value
== 0 ||
2895 cached_value
== pageheap
->GetDescriptor(p
)->sizeclass
;
2898 static inline void* CheckedMallocResult(void *result
)
2900 ASSERT(result
== 0 || CheckCachedSizeClass(result
));
2904 static inline void* SpanToMallocResult(Span
*span
) {
2905 pageheap
->CacheSizeClass(span
->start
, 0);
2907 CheckedMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
2910 static ALWAYS_INLINE
void* do_malloc(size_t size
) {
2914 ASSERT(!isForbidden());
2917 // The following call forces module initialization
2918 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
2920 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
2921 Span
* span
= DoSampledAllocation(size
);
2923 ret
= SpanToMallocResult(span
);
2927 if (size
> kMaxSize
) {
2928 // Use page-level allocator
2929 SpinLockHolder
h(&pageheap_lock
);
2930 Span
* span
= pageheap
->New(pages(size
));
2932 ret
= SpanToMallocResult(span
);
2935 // The common case, and also the simplest. This just pops the
2936 // size-appropriate freelist, afer replenishing it if it's empty.
2937 ret
= CheckedMallocResult(heap
->Allocate(size
));
2939 if (ret
== NULL
) errno
= ENOMEM
;
2943 static ALWAYS_INLINE
void do_free(void* ptr
) {
2944 if (ptr
== NULL
) return;
2945 ASSERT(pageheap
!= NULL
); // Should not call free() before malloc()
2946 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
2948 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
2951 span
= pageheap
->GetDescriptor(p
);
2952 cl
= span
->sizeclass
;
2953 pageheap
->CacheSizeClass(p
, cl
);
2956 ASSERT(!pageheap
->GetDescriptor(p
)->sample
);
2957 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCacheIfPresent();
2959 heap
->Deallocate(ptr
, cl
);
2961 // Delete directly into central cache
2962 SLL_SetNext(ptr
, NULL
);
2963 central_cache
[cl
].InsertRange(ptr
, ptr
, 1);
2966 SpinLockHolder
h(&pageheap_lock
);
2967 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
2968 ASSERT(span
!= NULL
&& span
->start
== p
);
2971 stacktrace_allocator
.Delete(reinterpret_cast<StackTrace
*>(span
->objects
));
2972 span
->objects
= NULL
;
2974 pageheap
->Delete(span
);
2979 // For use by exported routines below that want specific alignments
2981 // Note: this code can be slow, and can significantly fragment memory.
2982 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
2983 // not be invoked very often. This requirement simplifies our
2984 // implementation and allows us to tune for expected allocation
2986 static void* do_memalign(size_t align
, size_t size
) {
2987 ASSERT((align
& (align
- 1)) == 0);
2989 if (pageheap
== NULL
) TCMalloc_ThreadCache::InitModule();
2991 // Allocate at least one byte to avoid boundary conditions below
2992 if (size
== 0) size
= 1;
2994 if (size
<= kMaxSize
&& align
< kPageSize
) {
2995 // Search through acceptable size classes looking for one with
2996 // enough alignment. This depends on the fact that
2997 // InitSizeClasses() currently produces several size classes that
2998 // are aligned at powers of two. We will waste time and space if
2999 // we miss in the size class array, but that is deemed acceptable
3000 // since memalign() should be used rarely.
3001 size_t cl
= SizeClass(size
);
3002 while (cl
< kNumClasses
&& ((class_to_size
[cl
] & (align
- 1)) != 0)) {
3005 if (cl
< kNumClasses
) {
3006 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3007 return CheckedMallocResult(heap
->Allocate(class_to_size
[cl
]));
3011 // We will allocate directly from the page heap
3012 SpinLockHolder
h(&pageheap_lock
);
3014 if (align
<= kPageSize
) {
3015 // Any page-level allocation will be fine
3016 // TODO: We could put the rest of this page in the appropriate
3017 // TODO: cache but it does not seem worth it.
3018 Span
* span
= pageheap
->New(pages(size
));
3019 return span
== NULL
? NULL
: SpanToMallocResult(span
);
3022 // Allocate extra pages and carve off an aligned portion
3023 const Length alloc
= pages(size
+ align
);
3024 Span
* span
= pageheap
->New(alloc
);
3025 if (span
== NULL
) return NULL
;
3027 // Skip starting portion so that we end up aligned
3029 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
3032 ASSERT(skip
< alloc
);
3034 Span
* rest
= pageheap
->Split(span
, skip
);
3035 pageheap
->Delete(span
);
3039 // Skip trailing portion that we do not need to return
3040 const Length needed
= pages(size
);
3041 ASSERT(span
->length
>= needed
);
3042 if (span
->length
> needed
) {
3043 Span
* trailer
= pageheap
->Split(span
, needed
);
3044 pageheap
->Delete(trailer
);
3046 return SpanToMallocResult(span
);
3050 // Helpers for use by exported routines below:
3053 static inline void do_malloc_stats() {
3058 static inline int do_mallopt(int, int) {
3059 return 1; // Indicates error
3062 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3063 static inline struct mallinfo
do_mallinfo() {
3064 TCMallocStats stats
;
3065 ExtractStats(&stats
, NULL
);
3067 // Just some of the fields are filled in.
3068 struct mallinfo info
;
3069 memset(&info
, 0, sizeof(info
));
3071 // Unfortunately, the struct contains "int" field, so some of the
3072 // size values will be truncated.
3073 info
.arena
= static_cast<int>(stats
.system_bytes
);
3074 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
3075 + stats
.central_bytes
3076 + stats
.transfer_bytes
);
3077 info
.fordblks
= static_cast<int>(stats
.pageheap_bytes
);
3078 info
.uordblks
= static_cast<int>(stats
.system_bytes
3079 - stats
.thread_bytes
3080 - stats
.central_bytes
3081 - stats
.transfer_bytes
3082 - stats
.pageheap_bytes
);
3088 //-------------------------------------------------------------------
3089 // Exported routines
3090 //-------------------------------------------------------------------
3092 // CAVEAT: The code structure below ensures that MallocHook methods are always
3093 // called from the stack frame of the invoked allocation function.
3094 // heap-checker.cc depends on this to start a stack trace from
3095 // the call to the (de)allocation function.
3100 void* malloc(size_t size
) {
3101 void* result
= do_malloc(size
);
3103 MallocHook::InvokeNewHook(result
, size
);
3111 void free(void* ptr
) {
3113 MallocHook::InvokeDeleteHook(ptr
);
3121 void* calloc(size_t n
, size_t elem_size
) {
3122 const size_t totalBytes
= n
* elem_size
;
3124 // Protect against overflow
3125 if (n
> 1 && elem_size
&& (totalBytes
/ elem_size
) != n
)
3128 void* result
= do_malloc(totalBytes
);
3129 if (result
!= NULL
) {
3130 memset(result
, 0, totalBytes
);
3133 MallocHook::InvokeNewHook(result
, totalBytes
);
3141 void cfree(void* ptr
) {
3143 MallocHook::InvokeDeleteHook(ptr
);
3151 void* realloc(void* old_ptr
, size_t new_size
) {
3152 if (old_ptr
== NULL
) {
3153 void* result
= do_malloc(new_size
);
3155 MallocHook::InvokeNewHook(result
, new_size
);
3159 if (new_size
== 0) {
3161 MallocHook::InvokeDeleteHook(old_ptr
);
3167 // Get the size of the old entry
3168 const PageID p
= reinterpret_cast<uintptr_t>(old_ptr
) >> kPageShift
;
3169 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3173 span
= pageheap
->GetDescriptor(p
);
3174 cl
= span
->sizeclass
;
3175 pageheap
->CacheSizeClass(p
, cl
);
3178 old_size
= ByteSizeForClass(cl
);
3180 ASSERT(span
!= NULL
);
3181 old_size
= span
->length
<< kPageShift
;
3184 // Reallocate if the new size is larger than the old size,
3185 // or if the new size is significantly smaller than the old size.
3186 if ((new_size
> old_size
) || (AllocationSize(new_size
) < old_size
)) {
3187 // Need to reallocate
3188 void* new_ptr
= do_malloc(new_size
);
3189 if (new_ptr
== NULL
) {
3193 MallocHook::InvokeNewHook(new_ptr
, new_size
);
3195 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
3197 MallocHook::InvokeDeleteHook(old_ptr
);
3199 // We could use a variant of do_free() that leverages the fact
3200 // that we already know the sizeclass of old_ptr. The benefit
3201 // would be small, so don't bother.
3211 static SpinLock set_new_handler_lock
= SPINLOCK_INITIALIZER
;
3213 static inline void* cpp_alloc(size_t size
, bool nothrow
) {
3215 void* p
= do_malloc(size
);
3219 if (p
== NULL
) { // allocation failed
3220 // Get the current new handler. NB: this function is not
3221 // thread-safe. We make a feeble stab at making it so here, but
3222 // this lock only protects against tcmalloc interfering with
3223 // itself, not with other libraries calling set_new_handler.
3224 std::new_handler nh
;
3226 SpinLockHolder
h(&set_new_handler_lock
);
3227 nh
= std::set_new_handler(0);
3228 (void) std::set_new_handler(nh
);
3230 // If no new_handler is established, the allocation failed.
3232 if (nothrow
) return 0;
3233 throw std::bad_alloc();
3235 // Otherwise, try the new_handler. If it returns, retry the
3236 // allocation. If it throws std::bad_alloc, fail the allocation.
3237 // if it throws something else, don't interfere.
3240 } catch (const std::bad_alloc
&) {
3241 if (!nothrow
) throw;
3244 } else { // allocation success
3251 void* operator new(size_t size
) {
3252 void* p
= cpp_alloc(size
, false);
3253 // We keep this next instruction out of cpp_alloc for a reason: when
3254 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3255 // new call into cpp_alloc, which messes up our whole section-based
3256 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3257 // isn't the last thing this fn calls, and prevents the folding.
3258 MallocHook::InvokeNewHook(p
, size
);
3262 void* operator new(size_t size
, const std::nothrow_t
&) __THROW
{
3263 void* p
= cpp_alloc(size
, true);
3264 MallocHook::InvokeNewHook(p
, size
);
3268 void operator delete(void* p
) __THROW
{
3269 MallocHook::InvokeDeleteHook(p
);
3273 void operator delete(void* p
, const std::nothrow_t
&) __THROW
{
3274 MallocHook::InvokeDeleteHook(p
);
3278 void* operator new[](size_t size
) {
3279 void* p
= cpp_alloc(size
, false);
3280 // We keep this next instruction out of cpp_alloc for a reason: when
3281 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3282 // new call into cpp_alloc, which messes up our whole section-based
3283 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3284 // isn't the last thing this fn calls, and prevents the folding.
3285 MallocHook::InvokeNewHook(p
, size
);
3289 void* operator new[](size_t size
, const std::nothrow_t
&) __THROW
{
3290 void* p
= cpp_alloc(size
, true);
3291 MallocHook::InvokeNewHook(p
, size
);
3295 void operator delete[](void* p
) __THROW
{
3296 MallocHook::InvokeDeleteHook(p
);
3300 void operator delete[](void* p
, const std::nothrow_t
&) __THROW
{
3301 MallocHook::InvokeDeleteHook(p
);
3305 extern "C" void* memalign(size_t align
, size_t size
) __THROW
{
3306 void* result
= do_memalign(align
, size
);
3307 MallocHook::InvokeNewHook(result
, size
);
3311 extern "C" int posix_memalign(void** result_ptr
, size_t align
, size_t size
)
3313 if (((align
% sizeof(void*)) != 0) ||
3314 ((align
& (align
- 1)) != 0) ||
3319 void* result
= do_memalign(align
, size
);
3320 MallocHook::InvokeNewHook(result
, size
);
3321 if (result
== NULL
) {
3324 *result_ptr
= result
;
3329 static size_t pagesize
= 0;
3331 extern "C" void* valloc(size_t size
) __THROW
{
3332 // Allocate page-aligned object of length >= size bytes
3333 if (pagesize
== 0) pagesize
= getpagesize();
3334 void* result
= do_memalign(pagesize
, size
);
3335 MallocHook::InvokeNewHook(result
, size
);
3339 extern "C" void* pvalloc(size_t size
) __THROW
{
3340 // Round up size to a multiple of pagesize
3341 if (pagesize
== 0) pagesize
= getpagesize();
3342 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
3343 void* result
= do_memalign(pagesize
, size
);
3344 MallocHook::InvokeNewHook(result
, size
);
3348 extern "C" void malloc_stats(void) {
3352 extern "C" int mallopt(int cmd
, int value
) {
3353 return do_mallopt(cmd
, value
);
3356 #ifdef HAVE_STRUCT_MALLINFO
3357 extern "C" struct mallinfo
mallinfo(void) {
3358 return do_mallinfo();
3362 //-------------------------------------------------------------------
3363 // Some library routines on RedHat 9 allocate memory using malloc()
3364 // and free it using __libc_free() (or vice-versa). Since we provide
3365 // our own implementations of malloc/free, we need to make sure that
3366 // the __libc_XXX variants (defined as part of glibc) also point to
3367 // the same implementations.
3368 //-------------------------------------------------------------------
3370 #if defined(__GLIBC__)
3372 # if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
3373 // Potentially faster variants that use the gcc alias extension.
3374 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
3375 # define ALIAS(x) __attribute__ ((weak, alias (x)))
3376 void* __libc_malloc(size_t size
) ALIAS("malloc");
3377 void __libc_free(void* ptr
) ALIAS("free");
3378 void* __libc_realloc(void* ptr
, size_t size
) ALIAS("realloc");
3379 void* __libc_calloc(size_t n
, size_t size
) ALIAS("calloc");
3380 void __libc_cfree(void* ptr
) ALIAS("cfree");
3381 void* __libc_memalign(size_t align
, size_t s
) ALIAS("memalign");
3382 void* __libc_valloc(size_t size
) ALIAS("valloc");
3383 void* __libc_pvalloc(size_t size
) ALIAS("pvalloc");
3384 int __posix_memalign(void** r
, size_t a
, size_t s
) ALIAS("posix_memalign");
3386 # else /* not __GNUC__ */
3387 // Portable wrappers
3388 void* __libc_malloc(size_t size
) { return malloc(size
); }
3389 void __libc_free(void* ptr
) { free(ptr
); }
3390 void* __libc_realloc(void* ptr
, size_t size
) { return realloc(ptr
, size
); }
3391 void* __libc_calloc(size_t n
, size_t size
) { return calloc(n
, size
); }
3392 void __libc_cfree(void* ptr
) { cfree(ptr
); }
3393 void* __libc_memalign(size_t align
, size_t s
) { return memalign(align
, s
); }
3394 void* __libc_valloc(size_t size
) { return valloc(size
); }
3395 void* __libc_pvalloc(size_t size
) { return pvalloc(size
); }
3396 int __posix_memalign(void** r
, size_t a
, size_t s
) {
3397 return posix_memalign(r
, a
, s
);
3399 # endif /* __GNUC__ */
3401 #endif /* __GLIBC__ */
3403 // Override __libc_memalign in libc on linux boxes specially.
3404 // They have a bug in libc that causes them to (very rarely) allocate
3405 // with __libc_memalign() yet deallocate with free() and the
3406 // definitions above don't catch it.
3407 // This function is an exception to the rule of calling MallocHook method
3408 // from the stack frame of the allocation function;
3409 // heap-checker handles this special case explicitly.
3410 static void *MemalignOverride(size_t align
, size_t size
, const void *caller
)
3412 void* result
= do_memalign(align
, size
);
3413 MallocHook::InvokeNewHook(result
, size
);
3416 void *(*__memalign_hook
)(size_t, size_t, const void *) = MemalignOverride
;
3420 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
3421 #include <wtf/HashSet.h>
3423 class FreeObjectFinder
{
3424 const RemoteMemoryReader
& m_reader
;
3425 HashSet
<void*> m_freeObjects
;
3428 FreeObjectFinder(const RemoteMemoryReader
& reader
) : m_reader(reader
) { }
3430 void visit(void* ptr
) { m_freeObjects
.add(ptr
); }
3431 bool isFreeObject(void* ptr
) const { return m_freeObjects
.contains(ptr
); }
3432 size_t freeObjectCount() const { return m_freeObjects
.size(); }
3434 void findFreeObjects(TCMalloc_ThreadCache
* threadCache
)
3436 for (; threadCache
; threadCache
= (threadCache
->next_
? m_reader(threadCache
->next_
) : 0))
3437 threadCache
->enumerateFreeObjects(*this, m_reader
);
3440 void findFreeObjects(TCMalloc_Central_FreeListPadded
* centralFreeList
, size_t numSizes
)
3442 for (unsigned i
= 0; i
< numSizes
; i
++)
3443 centralFreeList
[i
].enumerateFreeObjects(*this, m_reader
);
3447 class PageMapFreeObjectFinder
{
3448 const RemoteMemoryReader
& m_reader
;
3449 FreeObjectFinder
& m_freeObjectFinder
;
3452 PageMapFreeObjectFinder(const RemoteMemoryReader
& reader
, FreeObjectFinder
& freeObjectFinder
)
3454 , m_freeObjectFinder(freeObjectFinder
)
3457 int visit(void* ptr
) const
3462 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3464 void* ptr
= reinterpret_cast<void*>(span
->start
<< kPageShift
);
3465 m_freeObjectFinder
.visit(ptr
);
3466 } else if (span
->sizeclass
) {
3467 // Walk the free list of the small-object span, keeping track of each object seen
3468 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *m_reader(reinterpret_cast<void**>(nextObject
)))
3469 m_freeObjectFinder
.visit(nextObject
);
3471 return span
->length
;
3475 class PageMapMemoryUsageRecorder
{
3478 unsigned m_typeMask
;
3479 vm_range_recorder_t
* m_recorder
;
3480 const RemoteMemoryReader
& m_reader
;
3481 const FreeObjectFinder
& m_freeObjectFinder
;
3482 mutable HashSet
<void*> m_seenPointers
;
3485 PageMapMemoryUsageRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
, const FreeObjectFinder
& freeObjectFinder
)
3487 , m_context(context
)
3488 , m_typeMask(typeMask
)
3489 , m_recorder(recorder
)
3491 , m_freeObjectFinder(freeObjectFinder
)
3494 int visit(void* ptr
) const
3499 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3500 if (m_seenPointers
.contains(ptr
))
3501 return span
->length
;
3502 m_seenPointers
.add(ptr
);
3504 // Mark the memory used for the Span itself as an administrative region
3505 vm_range_t ptrRange
= { reinterpret_cast<vm_address_t
>(ptr
), sizeof(Span
) };
3506 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3507 (*m_recorder
)(m_task
, m_context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &ptrRange
, 1);
3509 ptrRange
.address
= span
->start
<< kPageShift
;
3510 ptrRange
.size
= span
->length
* kPageSize
;
3512 // Mark the memory region the span represents as candidates for containing pointers
3513 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3514 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptrRange
, 1);
3516 if (!span
->free
&& (m_typeMask
& MALLOC_PTR_IN_USE_RANGE_TYPE
)) {
3517 // If it's an allocated large object span, mark it as in use
3518 if (span
->sizeclass
== 0 && !m_freeObjectFinder
.isFreeObject(reinterpret_cast<void*>(ptrRange
.address
)))
3519 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &ptrRange
, 1);
3520 else if (span
->sizeclass
) {
3521 const size_t byteSize
= ByteSizeForClass(span
->sizeclass
);
3522 unsigned totalObjects
= (span
->length
<< kPageShift
) / byteSize
;
3523 ASSERT(span
->refcount
<= totalObjects
);
3524 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
3526 // Mark each allocated small object within the span as in use
3527 for (unsigned i
= 0; i
< totalObjects
; i
++) {
3528 char* thisObject
= ptr
+ (i
* byteSize
);
3529 if (m_freeObjectFinder
.isFreeObject(thisObject
))
3532 vm_range_t objectRange
= { reinterpret_cast<vm_address_t
>(thisObject
), byteSize
};
3533 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &objectRange
, 1);
3538 return span
->length
;
3542 kern_return_t
FastMallocZone::enumerate(task_t task
, void* context
, unsigned typeMask
, vm_address_t zoneAddress
, memory_reader_t reader
, vm_range_recorder_t recorder
)
3544 RemoteMemoryReader
memoryReader(task
, reader
);
3548 FastMallocZone
* mzone
= memoryReader(reinterpret_cast<FastMallocZone
*>(zoneAddress
));
3549 TCMalloc_PageHeap
* pageHeap
= memoryReader(mzone
->m_pageHeap
);
3550 TCMalloc_ThreadCache
** threadHeapsPointer
= memoryReader(mzone
->m_threadHeaps
);
3551 TCMalloc_ThreadCache
* threadHeaps
= memoryReader(*threadHeapsPointer
);
3553 TCMalloc_Central_FreeListPadded
* centralCaches
= memoryReader(mzone
->m_centralCaches
, sizeof(TCMalloc_Central_FreeListPadded
) * kNumClasses
);
3555 FreeObjectFinder
finder(memoryReader
);
3556 finder
.findFreeObjects(threadHeaps
);
3557 finder
.findFreeObjects(centralCaches
, kNumClasses
);
3559 TCMalloc_PageHeap::PageMap
* pageMap
= &pageHeap
->pagemap_
;
3560 PageMapFreeObjectFinder
pageMapFinder(memoryReader
, finder
);
3561 pageMap
->visit(pageMapFinder
, memoryReader
);
3563 PageMapMemoryUsageRecorder
usageRecorder(task
, context
, typeMask
, recorder
, memoryReader
, finder
);
3564 pageMap
->visit(usageRecorder
, memoryReader
);
3569 size_t FastMallocZone::size(malloc_zone_t
*, const void*)
3574 void* FastMallocZone::zoneMalloc(malloc_zone_t
*, size_t)
3579 void* FastMallocZone::zoneCalloc(malloc_zone_t
*, size_t, size_t)
3584 void FastMallocZone::zoneFree(malloc_zone_t
*, void* ptr
)
3586 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
3587 // is not in this zone. When this happens, the pointer being freed was not allocated by any
3588 // zone so we need to print a useful error for the application developer.
3589 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr
);
3592 void* FastMallocZone::zoneRealloc(malloc_zone_t
*, void*, size_t)
3604 malloc_introspection_t jscore_fastmalloc_introspection
= { &FastMallocZone::enumerate
, &FastMallocZone::goodSize
, &FastMallocZone::check
, &FastMallocZone::print
,
3605 &FastMallocZone::log
, &FastMallocZone::forceLock
, &FastMallocZone::forceUnlock
, &FastMallocZone::statistics
};
3608 FastMallocZone::FastMallocZone(TCMalloc_PageHeap
* pageHeap
, TCMalloc_ThreadCache
** threadHeaps
, TCMalloc_Central_FreeListPadded
* centralCaches
)
3609 : m_pageHeap(pageHeap
)
3610 , m_threadHeaps(threadHeaps
)
3611 , m_centralCaches(centralCaches
)
3613 memset(&m_zone
, 0, sizeof(m_zone
));
3614 m_zone
.zone_name
= "JavaScriptCore FastMalloc";
3615 m_zone
.size
= &FastMallocZone::size
;
3616 m_zone
.malloc
= &FastMallocZone::zoneMalloc
;
3617 m_zone
.calloc
= &FastMallocZone::zoneCalloc
;
3618 m_zone
.realloc
= &FastMallocZone::zoneRealloc
;
3619 m_zone
.free
= &FastMallocZone::zoneFree
;
3620 m_zone
.valloc
= &FastMallocZone::zoneValloc
;
3621 m_zone
.destroy
= &FastMallocZone::zoneDestroy
;
3622 m_zone
.introspect
= &jscore_fastmalloc_introspection
;
3623 malloc_zone_register(&m_zone
);
3627 void FastMallocZone::init()
3629 static FastMallocZone
zone(pageheap
, &thread_heaps
, static_cast<TCMalloc_Central_FreeListPadded
*>(central_cache
));
3634 void releaseFastMallocFreeMemory()
3636 SpinLockHolder
h(&pageheap_lock
);
3637 pageheap
->ReleaseFreePages();
3644 #endif // FORCE_SYSTEM_MALLOC