1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat <opensource@google.com>
33 // A malloc that uses a per-thread cache to satisfy small malloc requests.
34 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
36 // See doc/tcmalloc.html for a high-level
37 // description of how this malloc works.
40 // 1. The thread-specific lists are accessed without acquiring any locks.
41 // This is safe because each such list is only accessed by one thread.
42 // 2. We have a lock per central free-list, and hold it while manipulating
43 // the central free list for a particular size.
44 // 3. The central page allocator is protected by "pageheap_lock".
45 // 4. The pagemap (which maps from page-number to descriptor),
46 // can be read without holding any locks, and written while holding
47 // the "pageheap_lock".
48 // 5. To improve performance, a subset of the information one can get
49 // from the pagemap is cached in a data structure, pagemap_cache_,
50 // that atomically reads and writes its entries. This cache can be
51 // read and written without locking.
53 // This multi-threaded access to the pagemap is safe for fairly
54 // subtle reasons. We basically assume that when an object X is
55 // allocated by thread A and deallocated by thread B, there must
56 // have been appropriate synchronization in the handoff of object
57 // X from thread A to thread B. The same logic applies to pagemap_cache_.
59 // THE PAGEID-TO-SIZECLASS CACHE
60 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
61 // returns 0 for a particular PageID then that means "no information," not that
62 // the sizeclass is 0. The cache may have stale information for pages that do
63 // not hold the beginning of any free()'able object. Staleness is eliminated
64 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
65 // do_memalign() for all other relevant pages.
67 // TODO: Bias reclamation to larger addresses
68 // TODO: implement mallinfo/mallopt
69 // TODO: Better testing
71 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
72 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
73 // * allocation of a reasonably complicated struct
74 // goes from about 1100 ns to about 300 ns.
77 #include "FastMalloc.h"
79 #include "Assertions.h"
80 #if USE(MULTIPLE_THREADS)
84 #ifndef NO_TCMALLOC_SAMPLES
86 #define NO_TCMALLOC_SAMPLES
90 #if !defined(USE_SYSTEM_MALLOC) && defined(NDEBUG)
91 #define FORCE_SYSTEM_MALLOC 0
93 #define FORCE_SYSTEM_MALLOC 1
99 #if USE(MULTIPLE_THREADS)
100 static pthread_key_t isForbiddenKey
;
101 static pthread_once_t isForbiddenKeyOnce
= PTHREAD_ONCE_INIT
;
102 static void initializeIsForbiddenKey()
104 pthread_key_create(&isForbiddenKey
, 0);
107 static bool isForbidden()
109 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
110 return !!pthread_getspecific(isForbiddenKey
);
113 void fastMallocForbid()
115 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
116 pthread_setspecific(isForbiddenKey
, &isForbiddenKey
);
119 void fastMallocAllow()
121 pthread_once(&isForbiddenKeyOnce
, initializeIsForbiddenKey
);
122 pthread_setspecific(isForbiddenKey
, 0);
127 static bool staticIsForbidden
;
128 static bool isForbidden()
130 return staticIsForbidden
;
133 void fastMallocForbid()
135 staticIsForbidden
= true;
138 void fastMallocAllow()
140 staticIsForbidden
= false;
142 #endif // USE(MULTIPLE_THREADS)
150 void *fastZeroedMalloc(size_t n
)
152 void *result
= fastMalloc(n
);
155 memset(result
, 0, n
);
157 MallocHook::InvokeNewHook(result
, n
);
164 #if FORCE_SYSTEM_MALLOC
167 #if !PLATFORM(WIN_OS)
173 void *fastMalloc(size_t n
)
175 ASSERT(!isForbidden());
179 void *fastCalloc(size_t n_elements
, size_t element_size
)
181 ASSERT(!isForbidden());
182 return calloc(n_elements
, element_size
);
185 void fastFree(void* p
)
187 ASSERT(!isForbidden());
191 void *fastRealloc(void* p
, size_t n
)
193 ASSERT(!isForbidden());
194 return realloc(p
, n
);
200 void releaseFastMallocFreeMemory() { }
204 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
205 // It will never be used in this case, so it's type and value are less interesting than its presence.
206 extern "C" const int jscore_fastmalloc_introspection
= 0;
213 #elif HAVE(INTTYPES_H)
214 #include <inttypes.h>
216 #include <sys/types.h>
219 #include "AlwaysInline.h"
220 #include "Assertions.h"
221 #include "TCPackedCache.h"
222 #include "TCPageMap.h"
223 #include "TCSpinLock.h"
224 #include "TCSystemAlloc.h"
233 #ifndef WIN32_LEAN_AND_MEAN
234 #define WIN32_LEAN_AND_MEAN
242 #include "MallocZoneSupport.h"
249 // Calling pthread_getspecific through a global function pointer is faster than a normal
250 // call to the function on Mac OS X, and it's used in performance-critical code. So we
251 // use a function pointer. But that's not necessarily faster on other platforms, and we had
252 // problems with this technique on Windows, so we'll do this only on Mac OS X.
254 static void* (*pthread_getspecific_function_pointer
)(pthread_key_t
) = pthread_getspecific
;
255 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
258 #define DEFINE_VARIABLE(type, name, value, meaning) \
259 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
260 type FLAGS_##name(value); \
261 char FLAGS_no##name; \
263 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
265 #define DEFINE_int64(name, value, meaning) \
266 DEFINE_VARIABLE(int64_t, name, value, meaning)
268 #define DEFINE_double(name, value, meaning) \
269 DEFINE_VARIABLE(double, name, value, meaning)
273 #define malloc fastMalloc
274 #define calloc fastCalloc
275 #define free fastFree
276 #define realloc fastRealloc
278 #define MESSAGE LOG_ERROR
279 #define CHECK_CONDITION ASSERT
282 class TCMalloc_PageHeap
;
283 class TCMalloc_ThreadCache
;
284 class TCMalloc_Central_FreeListPadded
;
286 class FastMallocZone
{
290 static kern_return_t
enumerate(task_t
, void*, unsigned typeMmask
, vm_address_t zoneAddress
, memory_reader_t
, vm_range_recorder_t
);
291 static size_t goodSize(malloc_zone_t
*, size_t size
) { return size
; }
292 static boolean_t
check(malloc_zone_t
*) { return true; }
293 static void print(malloc_zone_t
*, boolean_t
) { }
294 static void log(malloc_zone_t
*, void*) { }
295 static void forceLock(malloc_zone_t
*) { }
296 static void forceUnlock(malloc_zone_t
*) { }
297 static void statistics(malloc_zone_t
*, malloc_statistics_t
*) { }
300 FastMallocZone(TCMalloc_PageHeap
*, TCMalloc_ThreadCache
**, TCMalloc_Central_FreeListPadded
*);
301 static size_t size(malloc_zone_t
*, const void*);
302 static void* zoneMalloc(malloc_zone_t
*, size_t);
303 static void* zoneCalloc(malloc_zone_t
*, size_t numItems
, size_t size
);
304 static void zoneFree(malloc_zone_t
*, void*);
305 static void* zoneRealloc(malloc_zone_t
*, void*, size_t);
306 static void* zoneValloc(malloc_zone_t
*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
307 static void zoneDestroy(malloc_zone_t
*) { }
309 malloc_zone_t m_zone
;
310 TCMalloc_PageHeap
* m_pageHeap
;
311 TCMalloc_ThreadCache
** m_threadHeaps
;
312 TCMalloc_Central_FreeListPadded
* m_centralCaches
;
320 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
321 // you're porting to a system where you really can't get a stacktrace.
322 #ifdef NO_TCMALLOC_SAMPLES
323 // We use #define so code compiles even if you #include stacktrace.h somehow.
324 # define GetStackTrace(stack, depth, skip) (0)
326 # include <google/stacktrace.h>
330 // Even if we have support for thread-local storage in the compiler
331 // and linker, the OS may not support it. We need to check that at
332 // runtime. Right now, we have to keep a manual set of "bad" OSes.
333 #if defined(HAVE_TLS)
334 static bool kernel_supports_tls
= false; // be conservative
335 static inline bool KernelSupportsTLS() {
336 return kernel_supports_tls
;
338 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
339 static void CheckIfKernelSupportsTLS() {
340 kernel_supports_tls
= false;
343 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
344 static void CheckIfKernelSupportsTLS() {
346 if (uname(&buf
) != 0) { // should be impossible
347 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno
);
348 kernel_supports_tls
= false;
349 } else if (strcasecmp(buf
.sysname
, "linux") == 0) {
350 // The linux case: the first kernel to support TLS was 2.6.0
351 if (buf
.release
[0] < '2' && buf
.release
[1] == '.') // 0.x or 1.x
352 kernel_supports_tls
= false;
353 else if (buf
.release
[0] == '2' && buf
.release
[1] == '.' &&
354 buf
.release
[2] >= '0' && buf
.release
[2] < '6' &&
355 buf
.release
[3] == '.') // 2.0 - 2.5
356 kernel_supports_tls
= false;
358 kernel_supports_tls
= true;
359 } else { // some other kernel, we'll be optimisitic
360 kernel_supports_tls
= true;
362 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
364 # endif // HAVE_DECL_UNAME
367 // __THROW is defined in glibc systems. It means, counter-intuitively,
368 // "This function will never throw an exception." It's an optional
369 // optimization tool, but we may need to use it to match glibc prototypes.
370 #ifndef __THROW // I guess we're not on a glibc system
371 # define __THROW // __THROW is just an optimization, so ok to make it ""
374 //-------------------------------------------------------------------
376 //-------------------------------------------------------------------
378 // Not all possible combinations of the following parameters make
379 // sense. In particular, if kMaxSize increases, you may have to
380 // increase kNumClasses as well.
381 static const size_t kPageShift
= 12;
382 static const size_t kPageSize
= 1 << kPageShift
;
383 static const size_t kMaxSize
= 8u * kPageSize
;
384 static const size_t kAlignShift
= 3;
385 static const size_t kAlignment
= 1 << kAlignShift
;
386 static const size_t kNumClasses
= 68;
388 // Allocates a big block of memory for the pagemap once we reach more than
390 static const size_t kPageMapBigAllocationThreshold
= 128 << 20;
392 // Minimum number of pages to fetch from system at a time. Must be
393 // significantly bigger than kBlockSize to amortize system-call
394 // overhead, and also to reduce external fragementation. Also, we
395 // should keep this value big because various incarnations of Linux
396 // have small limits on the number of mmap() regions per
398 static const size_t kMinSystemAlloc
= 1 << (20 - kPageShift
);
400 // Number of objects to move between a per-thread list and a central
401 // list in one shot. We want this to be not too small so we can
402 // amortize the lock overhead for accessing the central list. Making
403 // it too big may temporarily cause unnecessary memory wastage in the
404 // per-thread free list until the scavenger cleans up the list.
405 static int num_objects_to_move
[kNumClasses
];
407 // Maximum length we allow a per-thread free-list to have before we
408 // move objects from it into the corresponding central free-list. We
409 // want this big to avoid locking the central free-list too often. It
410 // should not hurt to make this list somewhat big because the
411 // scavenging code will shrink it down when its contents are not in use.
412 static const int kMaxFreeListLength
= 256;
414 // Lower and upper bounds on the per-thread cache sizes
415 static const size_t kMinThreadCacheSize
= kMaxSize
* 2;
416 static const size_t kMaxThreadCacheSize
= 2 << 20;
418 // Default bound on the total amount of thread caches
419 static const size_t kDefaultOverallThreadCacheSize
= 16 << 20;
421 // For all span-lengths < kMaxPages we keep an exact-size list.
422 // REQUIRED: kMaxPages >= kMinSystemAlloc;
423 static const size_t kMaxPages
= kMinSystemAlloc
;
425 /* The smallest prime > 2^n */
426 static int primes_list
[] = {
427 // Small values might cause high rates of sampling
428 // and hence commented out.
429 // 2, 5, 11, 17, 37, 67, 131, 257,
430 // 521, 1031, 2053, 4099, 8209, 16411,
431 32771, 65537, 131101, 262147, 524309, 1048583,
432 2097169, 4194319, 8388617, 16777259, 33554467 };
434 // Twice the approximate gap between sampling actions.
435 // I.e., we take one sample approximately once every
436 // tcmalloc_sample_parameter/2
437 // bytes of allocation, i.e., ~ once every 128KB.
438 // Must be a prime number.
439 #ifdef NO_TCMALLOC_SAMPLES
440 DEFINE_int64(tcmalloc_sample_parameter
, 0,
441 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
442 static size_t sample_period
= 0;
444 DEFINE_int64(tcmalloc_sample_parameter
, 262147,
445 "Twice the approximate gap between sampling actions."
446 " Must be a prime number. Otherwise will be rounded up to a "
447 " larger prime number");
448 static size_t sample_period
= 262147;
451 // Protects sample_period above
452 static SpinLock sample_period_lock
= SPINLOCK_INITIALIZER
;
454 // Parameters for controlling how fast memory is returned to the OS.
456 DEFINE_double(tcmalloc_release_rate
, 1,
457 "Rate at which we release unused memory to the system. "
458 "Zero means we never release memory back to the system. "
459 "Increase this flag to return memory faster; decrease it "
460 "to return memory slower. Reasonable rates are in the "
463 //-------------------------------------------------------------------
464 // Mapping from size to size_class and vice versa
465 //-------------------------------------------------------------------
467 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
468 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
469 // So for these larger sizes we have an array indexed by ceil(size/128).
471 // We flatten both logical arrays into one physical array and use
472 // arithmetic to compute an appropriate index. The constants used by
473 // ClassIndex() were selected to make the flattening work.
476 // Size Expression Index
477 // -------------------------------------------------------
481 // 1024 (1024 + 7) / 8 128
482 // 1025 (1025 + 127 + (120<<7)) / 128 129
484 // 32768 (32768 + 127 + (120<<7)) / 128 376
485 static const size_t kMaxSmallSize
= 1024;
486 static const int shift_amount
[2] = { 3, 7 }; // For divides by 8 or 128
487 static const int add_amount
[2] = { 7, 127 + (120 << 7) };
488 static unsigned char class_array
[377];
490 // Compute index of the class_array[] entry for a given size
491 static inline int ClassIndex(size_t s
) {
492 const int i
= (s
> kMaxSmallSize
);
493 return static_cast<int>((s
+ add_amount
[i
]) >> shift_amount
[i
]);
496 // Mapping from size class to max size storable in that class
497 static size_t class_to_size
[kNumClasses
];
499 // Mapping from size class to number of pages to allocate at a time
500 static size_t class_to_pages
[kNumClasses
];
502 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
503 // back and forth between thread caches and the central cache for a given size
506 void *head
; // Head of chain of objects.
507 void *tail
; // Tail of chain of objects.
509 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
510 // slots to put link list chains into. To keep memory usage bounded the total
511 // number of TCEntries across size classes is fixed. Currently each size
512 // class is initially given one TCEntry which also means that the maximum any
513 // one class can have is kNumClasses.
514 static const int kNumTransferEntries
= kNumClasses
;
516 // Note: the following only works for "n"s that fit in 32-bits, but
517 // that is fine since we only use it for small sizes.
518 static inline int LgFloor(size_t n
) {
520 for (int i
= 4; i
>= 0; --i
) {
521 int shift
= (1 << i
);
522 size_t x
= n
>> shift
;
532 // Some very basic linked list functions for dealing with using void * as
535 static inline void *SLL_Next(void *t
) {
536 return *(reinterpret_cast<void**>(t
));
539 static inline void SLL_SetNext(void *t
, void *n
) {
540 *(reinterpret_cast<void**>(t
)) = n
;
543 static inline void SLL_Push(void **list
, void *element
) {
544 SLL_SetNext(element
, *list
);
548 static inline void *SLL_Pop(void **list
) {
549 void *result
= *list
;
550 *list
= SLL_Next(*list
);
555 // Remove N elements from a linked list to which head points. head will be
556 // modified to point to the new head. start and end will point to the first
557 // and last nodes of the range. Note that end will point to NULL after this
558 // function is called.
559 static inline void SLL_PopRange(void **head
, int N
, void **start
, void **end
) {
567 for (int i
= 1; i
< N
; ++i
) {
573 *head
= SLL_Next(tmp
);
574 // Unlink range from list.
575 SLL_SetNext(tmp
, NULL
);
578 static inline void SLL_PushRange(void **head
, void *start
, void *end
) {
580 SLL_SetNext(end
, *head
);
584 static inline size_t SLL_Size(void *head
) {
588 head
= SLL_Next(head
);
593 // Setup helper functions.
595 static ALWAYS_INLINE
size_t SizeClass(size_t size
) {
596 return class_array
[ClassIndex(size
)];
599 // Get the byte-size for a specified class
600 static ALWAYS_INLINE
size_t ByteSizeForClass(size_t cl
) {
601 return class_to_size
[cl
];
603 static int NumMoveSize(size_t size
) {
604 if (size
== 0) return 0;
605 // Use approx 64k transfers between thread and central caches.
606 int num
= static_cast<int>(64.0 * 1024.0 / size
);
607 if (num
< 2) num
= 2;
608 // Clamp well below kMaxFreeListLength to avoid ping pong between central
609 // and thread caches.
610 if (num
> static_cast<int>(0.8 * kMaxFreeListLength
))
611 num
= static_cast<int>(0.8 * kMaxFreeListLength
);
613 // Also, avoid bringing in too many objects into small object free
614 // lists. There are lots of such lists, and if we allow each one to
615 // fetch too many at a time, we end up having to scavenge too often
616 // (especially when there are lots of threads and each thread gets a
617 // small allowance for its thread cache).
619 // TODO: Make thread cache free list sizes dynamic so that we do not
620 // have to equally divide a fixed resource amongst lots of threads.
621 if (num
> 32) num
= 32;
626 // Initialize the mapping arrays
627 static void InitSizeClasses() {
628 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
629 if (ClassIndex(0) < 0) {
630 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
633 if (static_cast<size_t>(ClassIndex(kMaxSize
)) >= sizeof(class_array
)) {
634 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize
));
638 // Compute the size classes we want to use
639 size_t sc
= 1; // Next size class to assign
640 unsigned char alignshift
= kAlignShift
;
642 for (size_t size
= kAlignment
; size
<= kMaxSize
; size
+= (1 << alignshift
)) {
643 int lg
= LgFloor(size
);
645 // Increase alignment every so often.
647 // Since we double the alignment every time size doubles and
648 // size >= 128, this means that space wasted due to alignment is
649 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
650 // bytes, so the space wasted as a percentage starts falling for
652 if ((lg
>= 7) && (alignshift
< 8)) {
658 // Allocate enough pages so leftover is less than 1/8 of total.
659 // This bounds wasted space to at most 12.5%.
660 size_t psize
= kPageSize
;
661 while ((psize
% size
) > (psize
>> 3)) {
664 const size_t my_pages
= psize
>> kPageShift
;
666 if (sc
> 1 && my_pages
== class_to_pages
[sc
-1]) {
667 // See if we can merge this into the previous class without
668 // increasing the fragmentation of the previous class.
669 const size_t my_objects
= (my_pages
<< kPageShift
) / size
;
670 const size_t prev_objects
= (class_to_pages
[sc
-1] << kPageShift
)
671 / class_to_size
[sc
-1];
672 if (my_objects
== prev_objects
) {
673 // Adjust last class to include this size
674 class_to_size
[sc
-1] = size
;
680 class_to_pages
[sc
] = my_pages
;
681 class_to_size
[sc
] = size
;
684 if (sc
!= kNumClasses
) {
685 MESSAGE("wrong number of size classes: found %" PRIuS
" instead of %d\n",
686 sc
, int(kNumClasses
));
690 // Initialize the mapping arrays
692 for (unsigned char c
= 1; c
< kNumClasses
; c
++) {
693 const size_t max_size_in_class
= class_to_size
[c
];
694 for (size_t s
= next_size
; s
<= max_size_in_class
; s
+= kAlignment
) {
695 class_array
[ClassIndex(s
)] = c
;
697 next_size
= static_cast<int>(max_size_in_class
+ kAlignment
);
700 // Double-check sizes just to be safe
701 for (size_t size
= 0; size
<= kMaxSize
; size
++) {
702 const size_t sc
= SizeClass(size
);
704 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
707 if (sc
> 1 && size
<= class_to_size
[sc
-1]) {
708 MESSAGE("Allocating unnecessarily large class %" PRIuS
" for %" PRIuS
712 if (sc
>= kNumClasses
) {
713 MESSAGE("Bad size class %" PRIuS
" for %" PRIuS
"\n", sc
, size
);
716 const size_t s
= class_to_size
[sc
];
718 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
722 MESSAGE("Bad size %" PRIuS
" for %" PRIuS
" (sc = %" PRIuS
")\n", s
, size
, sc
);
727 // Initialize the num_objects_to_move array.
728 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
729 num_objects_to_move
[cl
] = NumMoveSize(ByteSizeForClass(cl
));
734 // Dump class sizes and maximum external wastage per size class
735 for (size_t cl
= 1; cl
< kNumClasses
; ++cl
) {
736 const int alloc_size
= class_to_pages
[cl
] << kPageShift
;
737 const int alloc_objs
= alloc_size
/ class_to_size
[cl
];
738 const int min_used
= (class_to_size
[cl
-1] + 1) * alloc_objs
;
739 const int max_waste
= alloc_size
- min_used
;
740 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
742 int(class_to_size
[cl
-1] + 1),
743 int(class_to_size
[cl
]),
744 int(class_to_pages
[cl
] << kPageShift
),
745 max_waste
* 100.0 / alloc_size
752 // -------------------------------------------------------------------------
753 // Simple allocator for objects of a specified type. External locking
754 // is required before accessing one of these objects.
755 // -------------------------------------------------------------------------
757 // Metadata allocator -- keeps stats about how many bytes allocated
758 static uint64_t metadata_system_bytes
= 0;
759 static void* MetaDataAlloc(size_t bytes
) {
760 void* result
= TCMalloc_SystemAlloc(bytes
, 0);
761 if (result
!= NULL
) {
762 metadata_system_bytes
+= bytes
;
768 class PageHeapAllocator
{
770 // How much to allocate from system at a time
771 static const size_t kAllocIncrement
= 32 << 10;
774 static const size_t kAlignedSize
775 = (((sizeof(T
) + kAlignment
- 1) / kAlignment
) * kAlignment
);
777 // Free area from which to carve new objects
781 // Free list of already carved objects
784 // Number of allocated but unfreed objects
789 ASSERT(kAlignedSize
<= kAllocIncrement
);
799 if (free_list_
!= NULL
) {
801 free_list_
= *(reinterpret_cast<void**>(result
));
803 if (free_avail_
< kAlignedSize
) {
805 free_area_
= reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement
));
806 if (free_area_
== NULL
) abort();
807 free_avail_
= kAllocIncrement
;
810 free_area_
+= kAlignedSize
;
811 free_avail_
-= kAlignedSize
;
814 return reinterpret_cast<T
*>(result
);
818 *(reinterpret_cast<void**>(p
)) = free_list_
;
823 int inuse() const { return inuse_
; }
826 // -------------------------------------------------------------------------
827 // Span - a contiguous run of pages
828 // -------------------------------------------------------------------------
830 // Type that can hold a page number
831 typedef uintptr_t PageID
;
833 // Type that can hold the length of a run of pages
834 typedef uintptr_t Length
;
836 static const Length kMaxValidPages
= (~static_cast<Length
>(0)) >> kPageShift
;
838 // Convert byte size into pages. This won't overflow, but may return
839 // an unreasonably large value if bytes is huge enough.
840 static inline Length
pages(size_t bytes
) {
841 return (bytes
>> kPageShift
) +
842 ((bytes
& (kPageSize
- 1)) > 0 ? 1 : 0);
845 // Convert a user size into the number of bytes that will actually be
847 static size_t AllocationSize(size_t bytes
) {
848 if (bytes
> kMaxSize
) {
849 // Large object: we allocate an integral number of pages
850 ASSERT(bytes
<= (kMaxValidPages
<< kPageShift
));
851 return pages(bytes
) << kPageShift
;
853 // Small object: find the size class to which it belongs
854 return ByteSizeForClass(SizeClass(bytes
));
858 // Information kept for a span (a contiguous run of pages).
860 PageID start
; // Starting page number
861 Length length
; // Number of pages in span
862 Span
* next
; // Used when in link list
863 Span
* prev
; // Used when in link list
864 void* objects
; // Linked list of free objects
865 unsigned int free
: 1; // Is the span free
866 unsigned int sample
: 1; // Sampled object?
867 unsigned int sizeclass
: 8; // Size-class for small objects (or 0)
868 unsigned int refcount
: 11; // Number of non-free objects
872 // For debugging, we can keep a log events per span
880 void Event(Span
* span
, char op
, int v
= 0) {
881 span
->history
[span
->nexthistory
] = op
;
882 span
->value
[span
->nexthistory
] = v
;
884 if (span
->nexthistory
== sizeof(span
->history
)) span
->nexthistory
= 0;
887 #define Event(s,o,v) ((void) 0)
890 // Allocator/deallocator for spans
891 static PageHeapAllocator
<Span
> span_allocator
;
892 static Span
* NewSpan(PageID p
, Length len
) {
893 Span
* result
= span_allocator
.New();
894 memset(result
, 0, sizeof(*result
));
896 result
->length
= len
;
898 result
->nexthistory
= 0;
903 static inline void DeleteSpan(Span
* span
) {
905 // In debug mode, trash the contents of deleted Spans
906 memset(span
, 0x3f, sizeof(*span
));
908 span_allocator
.Delete(span
);
911 // -------------------------------------------------------------------------
912 // Doubly linked list of spans.
913 // -------------------------------------------------------------------------
915 static inline void DLL_Init(Span
* list
) {
920 static inline void DLL_Remove(Span
* span
) {
921 span
->prev
->next
= span
->next
;
922 span
->next
->prev
= span
->prev
;
927 static ALWAYS_INLINE
bool DLL_IsEmpty(const Span
* list
) {
928 return list
->next
== list
;
932 static int DLL_Length(const Span
* list
) {
934 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
941 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
942 static void DLL_Print(const char* label
, const Span
* list
) {
943 MESSAGE("%-10s %p:", label
, list
);
944 for (const Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
945 MESSAGE(" <%p,%u,%u>", s
, s
->start
, s
->length
);
951 static inline void DLL_Prepend(Span
* list
, Span
* span
) {
952 ASSERT(span
->next
== NULL
);
953 ASSERT(span
->prev
== NULL
);
954 span
->next
= list
->next
;
956 list
->next
->prev
= span
;
960 // -------------------------------------------------------------------------
961 // Stack traces kept for sampled allocations
962 // The following state is protected by pageheap_lock_.
963 // -------------------------------------------------------------------------
965 // size/depth are made the same size as a pointer so that some generic
966 // code below can conveniently cast them back and forth to void*.
967 static const int kMaxStackDepth
= 31;
969 uintptr_t size
; // Size of object
970 uintptr_t depth
; // Number of PC values stored in array below
971 void* stack
[kMaxStackDepth
];
973 static PageHeapAllocator
<StackTrace
> stacktrace_allocator
;
974 static Span sampled_objects
;
976 // -------------------------------------------------------------------------
977 // Map from page-id to per-page data
978 // -------------------------------------------------------------------------
980 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
981 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
982 // because sometimes the sizeclass is all the information we need.
984 // Selector class -- general selector uses 3-level map
985 template <int BITS
> class MapSelector
{
987 typedef TCMalloc_PageMap3
<BITS
-kPageShift
> Type
;
988 typedef PackedCache
<BITS
, uint64_t> CacheType
;
991 // A two-level map for 32-bit machines
992 template <> class MapSelector
<32> {
994 typedef TCMalloc_PageMap2
<32-kPageShift
> Type
;
995 typedef PackedCache
<32-kPageShift
, uint16_t> CacheType
;
998 // -------------------------------------------------------------------------
999 // Page-level allocator
1000 // * Eager coalescing
1002 // Heap for page-level allocation. We allow allocating and freeing a
1003 // contiguous runs of pages (called a "span").
1004 // -------------------------------------------------------------------------
1006 class TCMalloc_PageHeap
{
1010 // Allocate a run of "n" pages. Returns zero if out of memory.
1011 Span
* New(Length n
);
1013 // Delete the span "[p, p+n-1]".
1014 // REQUIRES: span was returned by earlier call to New() and
1015 // has not yet been deleted.
1016 void Delete(Span
* span
);
1018 // Mark an allocated span as being used for small objects of the
1019 // specified size-class.
1020 // REQUIRES: span was returned by an earlier call to New()
1021 // and has not yet been deleted.
1022 void RegisterSizeClass(Span
* span
, size_t sc
);
1024 // Split an allocated span into two spans: one of length "n" pages
1025 // followed by another span of length "span->length - n" pages.
1026 // Modifies "*span" to point to the first span of length "n" pages.
1027 // Returns a pointer to the second span.
1029 // REQUIRES: "0 < n < span->length"
1030 // REQUIRES: !span->free
1031 // REQUIRES: span->sizeclass == 0
1032 Span
* Split(Span
* span
, Length n
);
1034 // Return the descriptor for the specified page.
1035 inline Span
* GetDescriptor(PageID p
) const {
1036 return reinterpret_cast<Span
*>(pagemap_
.get(p
));
1040 inline Span
* GetDescriptorEnsureSafe(PageID p
)
1042 pagemap_
.Ensure(p
, 1);
1043 return GetDescriptor(p
);
1047 // Dump state to stderr
1049 void Dump(TCMalloc_Printer
* out
);
1052 // Return number of bytes allocated from system
1053 inline uint64_t SystemBytes() const { return system_bytes_
; }
1055 // Return number of free bytes in heap
1056 uint64_t FreeBytes() const {
1057 return (static_cast<uint64_t>(free_pages_
) << kPageShift
);
1061 bool CheckList(Span
* list
, Length min_pages
, Length max_pages
);
1063 // Release all pages on the free list for reuse by the OS:
1064 void ReleaseFreePages();
1066 // Return 0 if we have no information, or else the correct sizeclass for p.
1067 // Reads and writes to pagemap_cache_ do not require locking.
1068 // The entries are 64 bits on 64-bit hardware and 16 bits on
1069 // 32-bit hardware, and we don't mind raciness as long as each read of
1070 // an entry yields a valid entry, not a partially updated entry.
1071 size_t GetSizeClassIfCached(PageID p
) const {
1072 return pagemap_cache_
.GetOrDefault(p
, 0);
1074 void CacheSizeClass(PageID p
, size_t cl
) const { pagemap_cache_
.Put(p
, cl
); }
1077 // Pick the appropriate map and cache types based on pointer size
1078 typedef MapSelector
<8*sizeof(uintptr_t)>::Type PageMap
;
1079 typedef MapSelector
<8*sizeof(uintptr_t)>::CacheType PageMapCache
;
1081 mutable PageMapCache pagemap_cache_
;
1083 // We segregate spans of a given size into two circular linked
1084 // lists: one for normal spans, and one for spans whose memory
1085 // has been returned to the system.
1091 // List of free spans of length >= kMaxPages
1094 // Array mapping from span length to a doubly linked list of free spans
1095 SpanList free_
[kMaxPages
];
1097 // Number of pages kept in free lists
1098 uintptr_t free_pages_
;
1100 // Bytes allocated from system
1101 uint64_t system_bytes_
;
1103 bool GrowHeap(Length n
);
1105 // REQUIRES span->length >= n
1106 // Remove span from its free list, and move any leftover part of
1107 // span into appropriate free lists. Also update "span" to have
1108 // length exactly "n" and mark it as non-free so it can be returned
1111 // "released" is true iff "span" was found on a "returned" list.
1112 void Carve(Span
* span
, Length n
, bool released
);
1114 void RecordSpan(Span
* span
) {
1115 pagemap_
.set(span
->start
, span
);
1116 if (span
->length
> 1) {
1117 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1121 // Allocate a large span of length == n. If successful, returns a
1122 // span of exactly the specified length. Else, returns NULL.
1123 Span
* AllocLarge(Length n
);
1125 // Incrementally release some memory to the system.
1126 // IncrementalScavenge(n) is called whenever n pages are freed.
1127 void IncrementalScavenge(Length n
);
1129 // Number of pages to deallocate before doing more scavenging
1130 int64_t scavenge_counter_
;
1132 // Index of last free list we scavenged
1133 size_t scavenge_index_
;
1135 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
1136 friend class FastMallocZone
;
1140 void TCMalloc_PageHeap::init()
1142 pagemap_
.init(MetaDataAlloc
);
1143 pagemap_cache_
= PageMapCache(0);
1146 scavenge_counter_
= 0;
1147 // Start scavenging at kMaxPages list
1148 scavenge_index_
= kMaxPages
-1;
1149 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
1150 DLL_Init(&large_
.normal
);
1151 DLL_Init(&large_
.returned
);
1152 for (size_t i
= 0; i
< kMaxPages
; i
++) {
1153 DLL_Init(&free_
[i
].normal
);
1154 DLL_Init(&free_
[i
].returned
);
1158 inline Span
* TCMalloc_PageHeap::New(Length n
) {
1162 // Find first size >= n that has a non-empty list
1163 for (Length s
= n
; s
< kMaxPages
; s
++) {
1165 bool released
= false;
1166 if (!DLL_IsEmpty(&free_
[s
].normal
)) {
1167 // Found normal span
1168 ll
= &free_
[s
].normal
;
1169 } else if (!DLL_IsEmpty(&free_
[s
].returned
)) {
1170 // Found returned span; reallocate it
1171 ll
= &free_
[s
].returned
;
1174 // Keep looking in larger classes
1178 Span
* result
= ll
->next
;
1179 Carve(result
, n
, released
);
1185 Span
* result
= AllocLarge(n
);
1186 if (result
!= NULL
) return result
;
1188 // Grow the heap and try again
1194 return AllocLarge(n
);
1197 Span
* TCMalloc_PageHeap::AllocLarge(Length n
) {
1198 // find the best span (closest to n in size).
1199 // The following loops implements address-ordered best-fit.
1200 bool from_released
= false;
1203 // Search through normal list
1204 for (Span
* span
= large_
.normal
.next
;
1205 span
!= &large_
.normal
;
1206 span
= span
->next
) {
1207 if (span
->length
>= n
) {
1209 || (span
->length
< best
->length
)
1210 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1212 from_released
= false;
1217 // Search through released list in case it has a better fit
1218 for (Span
* span
= large_
.returned
.next
;
1219 span
!= &large_
.returned
;
1220 span
= span
->next
) {
1221 if (span
->length
>= n
) {
1223 || (span
->length
< best
->length
)
1224 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
1226 from_released
= true;
1232 Carve(best
, n
, from_released
);
1240 Span
* TCMalloc_PageHeap::Split(Span
* span
, Length n
) {
1242 ASSERT(n
< span
->length
);
1243 ASSERT(!span
->free
);
1244 ASSERT(span
->sizeclass
== 0);
1245 Event(span
, 'T', n
);
1247 const Length extra
= span
->length
- n
;
1248 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1249 Event(leftover
, 'U', extra
);
1250 RecordSpan(leftover
);
1251 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
1257 inline void TCMalloc_PageHeap::Carve(Span
* span
, Length n
, bool released
) {
1261 Event(span
, 'A', n
);
1263 const int extra
= static_cast<int>(span
->length
- n
);
1266 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
1268 Event(leftover
, 'S', extra
);
1269 RecordSpan(leftover
);
1271 // Place leftover span on appropriate free list
1272 SpanList
* listpair
= (static_cast<size_t>(extra
) < kMaxPages
) ? &free_
[extra
] : &large_
;
1273 Span
* dst
= released
? &listpair
->returned
: &listpair
->normal
;
1274 DLL_Prepend(dst
, leftover
);
1277 pagemap_
.set(span
->start
+ n
- 1, span
);
1281 inline void TCMalloc_PageHeap::Delete(Span
* span
) {
1283 ASSERT(!span
->free
);
1284 ASSERT(span
->length
> 0);
1285 ASSERT(GetDescriptor(span
->start
) == span
);
1286 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
1287 span
->sizeclass
= 0;
1290 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1291 // necessary. We do not bother resetting the stale pagemap
1292 // entries for the pieces we are merging together because we only
1293 // care about the pagemap entries for the boundaries.
1295 // Note that the spans we merge into "span" may come out of
1296 // a "returned" list. For simplicity, we move these into the
1297 // "normal" list of the appropriate size class.
1298 const PageID p
= span
->start
;
1299 const Length n
= span
->length
;
1300 Span
* prev
= GetDescriptor(p
-1);
1301 if (prev
!= NULL
&& prev
->free
) {
1302 // Merge preceding span into this span
1303 ASSERT(prev
->start
+ prev
->length
== p
);
1304 const Length len
= prev
->length
;
1308 span
->length
+= len
;
1309 pagemap_
.set(span
->start
, span
);
1310 Event(span
, 'L', len
);
1312 Span
* next
= GetDescriptor(p
+n
);
1313 if (next
!= NULL
&& next
->free
) {
1314 // Merge next span into this span
1315 ASSERT(next
->start
== p
+n
);
1316 const Length len
= next
->length
;
1319 span
->length
+= len
;
1320 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
1321 Event(span
, 'R', len
);
1324 Event(span
, 'D', span
->length
);
1326 if (span
->length
< kMaxPages
) {
1327 DLL_Prepend(&free_
[span
->length
].normal
, span
);
1329 DLL_Prepend(&large_
.normal
, span
);
1333 IncrementalScavenge(n
);
1337 void TCMalloc_PageHeap::IncrementalScavenge(Length n
) {
1338 // Fast path; not yet time to release memory
1339 scavenge_counter_
-= n
;
1340 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
1342 // If there is nothing to release, wait for so many pages before
1343 // scavenging again. With 4K pages, this comes to 16MB of memory.
1344 static const size_t kDefaultReleaseDelay
= 1 << 8;
1346 // Find index of free list to scavenge
1347 size_t index
= scavenge_index_
+ 1;
1348 for (size_t i
= 0; i
< kMaxPages
+1; i
++) {
1349 if (index
> kMaxPages
) index
= 0;
1350 SpanList
* slist
= (index
== kMaxPages
) ? &large_
: &free_
[index
];
1351 if (!DLL_IsEmpty(&slist
->normal
)) {
1352 // Release the last span on the normal portion of this list
1353 Span
* s
= slist
->normal
.prev
;
1355 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1356 static_cast<size_t>(s
->length
<< kPageShift
));
1357 DLL_Prepend(&slist
->returned
, s
);
1359 scavenge_counter_
= std::max
<size_t>(64UL, std::min
<size_t>(kDefaultReleaseDelay
, kDefaultReleaseDelay
- (free_pages_
/ kDefaultReleaseDelay
)));
1361 if (index
== kMaxPages
&& !DLL_IsEmpty(&slist
->normal
))
1362 scavenge_index_
= index
- 1;
1364 scavenge_index_
= index
;
1370 // Nothing to scavenge, delay for a while
1371 scavenge_counter_
= kDefaultReleaseDelay
;
1374 void TCMalloc_PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
1375 // Associate span object with all interior pages as well
1376 ASSERT(!span
->free
);
1377 ASSERT(GetDescriptor(span
->start
) == span
);
1378 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
1379 Event(span
, 'C', sc
);
1380 span
->sizeclass
= static_cast<unsigned int>(sc
);
1381 for (Length i
= 1; i
< span
->length
-1; i
++) {
1382 pagemap_
.set(span
->start
+i
, span
);
1387 static double PagesToMB(uint64_t pages
) {
1388 return (pages
<< kPageShift
) / 1048576.0;
1391 void TCMalloc_PageHeap::Dump(TCMalloc_Printer
* out
) {
1392 int nonempty_sizes
= 0;
1393 for (int s
= 0; s
< kMaxPages
; s
++) {
1394 if (!DLL_IsEmpty(&free_
[s
].normal
) || !DLL_IsEmpty(&free_
[s
].returned
)) {
1398 out
->printf("------------------------------------------------\n");
1399 out
->printf("PageHeap: %d sizes; %6.1f MB free\n",
1400 nonempty_sizes
, PagesToMB(free_pages_
));
1401 out
->printf("------------------------------------------------\n");
1402 uint64_t total_normal
= 0;
1403 uint64_t total_returned
= 0;
1404 for (int s
= 0; s
< kMaxPages
; s
++) {
1405 const int n_length
= DLL_Length(&free_
[s
].normal
);
1406 const int r_length
= DLL_Length(&free_
[s
].returned
);
1407 if (n_length
+ r_length
> 0) {
1408 uint64_t n_pages
= s
* n_length
;
1409 uint64_t r_pages
= s
* r_length
;
1410 total_normal
+= n_pages
;
1411 total_returned
+= r_pages
;
1412 out
->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1413 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1415 (n_length
+ r_length
),
1416 PagesToMB(n_pages
+ r_pages
),
1417 PagesToMB(total_normal
+ total_returned
),
1419 PagesToMB(total_returned
));
1423 uint64_t n_pages
= 0;
1424 uint64_t r_pages
= 0;
1427 out
->printf("Normal large spans:\n");
1428 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
1429 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1430 s
->length
, PagesToMB(s
->length
));
1431 n_pages
+= s
->length
;
1434 out
->printf("Unmapped large spans:\n");
1435 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
1436 out
->printf(" [ %6" PRIuS
" pages ] %6.1f MB\n",
1437 s
->length
, PagesToMB(s
->length
));
1438 r_pages
+= s
->length
;
1441 total_normal
+= n_pages
;
1442 total_returned
+= r_pages
;
1443 out
->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1444 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1445 (n_spans
+ r_spans
),
1446 PagesToMB(n_pages
+ r_pages
),
1447 PagesToMB(total_normal
+ total_returned
),
1449 PagesToMB(total_returned
));
1453 bool TCMalloc_PageHeap::GrowHeap(Length n
) {
1454 ASSERT(kMaxPages
>= kMinSystemAlloc
);
1455 if (n
> kMaxValidPages
) return false;
1456 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
1458 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
1461 // Try growing just "n" pages
1463 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);;
1465 if (ptr
== NULL
) return false;
1467 ask
= actual_size
>> kPageShift
;
1469 uint64_t old_system_bytes
= system_bytes_
;
1470 system_bytes_
+= (ask
<< kPageShift
);
1471 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
1474 // If we have already a lot of pages allocated, just pre allocate a bunch of
1475 // memory for the page map. This prevents fragmentation by pagemap metadata
1476 // when a program keeps allocating and freeing large blocks.
1478 if (old_system_bytes
< kPageMapBigAllocationThreshold
1479 && system_bytes_
>= kPageMapBigAllocationThreshold
) {
1480 pagemap_
.PreallocateMoreMemory();
1483 // Make sure pagemap_ has entries for all of the new pages.
1484 // Plus ensure one before and one after so coalescing code
1485 // does not need bounds-checking.
1486 if (pagemap_
.Ensure(p
-1, ask
+2)) {
1487 // Pretend the new area is allocated and then Delete() it to
1488 // cause any necessary coalescing to occur.
1490 // We do not adjust free_pages_ here since Delete() will do it for us.
1491 Span
* span
= NewSpan(p
, ask
);
1497 // We could not allocate memory within "pagemap_"
1498 // TODO: Once we can return memory to the system, return the new span
1503 bool TCMalloc_PageHeap::Check() {
1504 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
1505 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
1506 CheckList(&large_
.normal
, kMaxPages
, 1000000000);
1507 CheckList(&large_
.returned
, kMaxPages
, 1000000000);
1508 for (Length s
= 1; s
< kMaxPages
; s
++) {
1509 CheckList(&free_
[s
].normal
, s
, s
);
1510 CheckList(&free_
[s
].returned
, s
, s
);
1516 bool TCMalloc_PageHeap::CheckList(Span
*, Length
, Length
) {
1520 bool TCMalloc_PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
) {
1521 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
1522 CHECK_CONDITION(s
->free
);
1523 CHECK_CONDITION(s
->length
>= min_pages
);
1524 CHECK_CONDITION(s
->length
<= max_pages
);
1525 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
1526 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
1532 static void ReleaseFreeList(Span
* list
, Span
* returned
) {
1533 // Walk backwards through list so that when we push these
1534 // spans on the "returned" list, we preserve the order.
1535 while (!DLL_IsEmpty(list
)) {
1536 Span
* s
= list
->prev
;
1538 DLL_Prepend(returned
, s
);
1539 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
1540 static_cast<size_t>(s
->length
<< kPageShift
));
1544 void TCMalloc_PageHeap::ReleaseFreePages() {
1545 for (Length s
= 0; s
< kMaxPages
; s
++) {
1546 ReleaseFreeList(&free_
[s
].normal
, &free_
[s
].returned
);
1548 ReleaseFreeList(&large_
.normal
, &large_
.returned
);
1552 //-------------------------------------------------------------------
1554 //-------------------------------------------------------------------
1556 class TCMalloc_ThreadCache_FreeList
{
1558 void* list_
; // Linked list of nodes
1559 uint16_t length_
; // Current length
1560 uint16_t lowater_
; // Low water mark for list length
1569 // Return current length of list
1570 int length() const {
1575 bool empty() const {
1576 return list_
== NULL
;
1579 // Low-water mark management
1580 int lowwatermark() const { return lowater_
; }
1581 void clear_lowwatermark() { lowater_
= length_
; }
1583 ALWAYS_INLINE
void Push(void* ptr
) {
1584 SLL_Push(&list_
, ptr
);
1588 void PushRange(int N
, void *start
, void *end
) {
1589 SLL_PushRange(&list_
, start
, end
);
1590 length_
= length_
+ static_cast<uint16_t>(N
);
1593 void PopRange(int N
, void **start
, void **end
) {
1594 SLL_PopRange(&list_
, N
, start
, end
);
1595 ASSERT(length_
>= N
);
1596 length_
= length_
- static_cast<uint16_t>(N
);
1597 if (length_
< lowater_
) lowater_
= length_
;
1600 ALWAYS_INLINE
void* Pop() {
1601 ASSERT(list_
!= NULL
);
1603 if (length_
< lowater_
) lowater_
= length_
;
1604 return SLL_Pop(&list_
);
1608 template <class Finder
, class Reader
>
1609 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1611 for (void* nextObject
= list_
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1612 finder
.visit(nextObject
);
1617 //-------------------------------------------------------------------
1618 // Data kept per thread
1619 //-------------------------------------------------------------------
1621 class TCMalloc_ThreadCache
{
1623 typedef TCMalloc_ThreadCache_FreeList FreeList
;
1625 typedef DWORD ThreadIdentifier
;
1627 typedef pthread_t ThreadIdentifier
;
1630 size_t size_
; // Combined size of data
1631 ThreadIdentifier tid_
; // Which thread owns it
1632 bool in_setspecific_
; // Called pthread_setspecific?
1633 FreeList list_
[kNumClasses
]; // Array indexed by size-class
1635 // We sample allocations, biased by the size of the allocation
1636 uint32_t rnd_
; // Cheap random number generator
1637 size_t bytes_until_sample_
; // Bytes until we sample next
1639 // Allocate a new heap. REQUIRES: pageheap_lock is held.
1640 static inline TCMalloc_ThreadCache
* NewHeap(ThreadIdentifier tid
);
1642 // Use only as pthread thread-specific destructor function.
1643 static void DestroyThreadCache(void* ptr
);
1645 // All ThreadCache objects are kept in a linked list (for stats collection)
1646 TCMalloc_ThreadCache
* next_
;
1647 TCMalloc_ThreadCache
* prev_
;
1649 void Init(ThreadIdentifier tid
);
1652 // Accessors (mostly just for printing stats)
1653 int freelist_length(size_t cl
) const { return list_
[cl
].length(); }
1655 // Total byte size in cache
1656 size_t Size() const { return size_
; }
1658 void* Allocate(size_t size
);
1659 void Deallocate(void* ptr
, size_t size_class
);
1661 void FetchFromCentralCache(size_t cl
, size_t allocationSize
);
1662 void ReleaseToCentralCache(size_t cl
, int N
);
1666 // Record allocation of "k" bytes. Return true iff allocation
1667 // should be sampled
1668 bool SampleAllocation(size_t k
);
1670 // Pick next sampling point
1671 void PickNextSample(size_t k
);
1673 static void InitModule();
1674 static void InitTSD();
1675 static TCMalloc_ThreadCache
* GetThreadHeap();
1676 static TCMalloc_ThreadCache
* GetCache();
1677 static TCMalloc_ThreadCache
* GetCacheIfPresent();
1678 static TCMalloc_ThreadCache
* CreateCacheIfNecessary();
1679 static void DeleteCache(TCMalloc_ThreadCache
* heap
);
1680 static void BecomeIdle();
1681 static void RecomputeThreadCacheSize();
1684 template <class Finder
, class Reader
>
1685 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1687 for (unsigned sizeClass
= 0; sizeClass
< kNumClasses
; sizeClass
++)
1688 list_
[sizeClass
].enumerateFreeObjects(finder
, reader
);
1693 //-------------------------------------------------------------------
1694 // Data kept per size-class in central cache
1695 //-------------------------------------------------------------------
1697 class TCMalloc_Central_FreeList
{
1699 void Init(size_t cl
);
1701 // These methods all do internal locking.
1703 // Insert the specified range into the central freelist. N is the number of
1704 // elements in the range.
1705 void InsertRange(void *start
, void *end
, int N
);
1707 // Returns the actual number of fetched elements into N.
1708 void RemoveRange(void **start
, void **end
, int *N
);
1710 // Returns the number of free objects in cache.
1712 SpinLockHolder
h(&lock_
);
1716 // Returns the number of free objects in the transfer cache.
1718 SpinLockHolder
h(&lock_
);
1719 return used_slots_
* num_objects_to_move
[size_class_
];
1723 template <class Finder
, class Reader
>
1724 void enumerateFreeObjects(Finder
& finder
, const Reader
& reader
)
1726 for (Span
* span
= &empty_
; span
&& span
!= &empty_
; span
= (span
->next
? reader(span
->next
) : 0))
1727 ASSERT(!span
->objects
);
1729 ASSERT(!nonempty_
.objects
);
1730 for (Span
* span
= reader(nonempty_
.next
); span
&& span
!= &nonempty_
; span
= (span
->next
? reader(span
->next
) : 0)) {
1731 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *reader(reinterpret_cast<void**>(nextObject
)))
1732 finder
.visit(nextObject
);
1738 // REQUIRES: lock_ is held
1739 // Remove object from cache and return.
1740 // Return NULL if no free entries in cache.
1741 void* FetchFromSpans();
1743 // REQUIRES: lock_ is held
1744 // Remove object from cache and return. Fetches
1745 // from pageheap if cache is empty. Only returns
1746 // NULL on allocation failure.
1747 void* FetchFromSpansSafe();
1749 // REQUIRES: lock_ is held
1750 // Release a linked list of objects to spans.
1751 // May temporarily release lock_.
1752 void ReleaseListToSpans(void *start
);
1754 // REQUIRES: lock_ is held
1755 // Release an object to spans.
1756 // May temporarily release lock_.
1757 void ReleaseToSpans(void* object
);
1759 // REQUIRES: lock_ is held
1760 // Populate cache by fetching from the page heap.
1761 // May temporarily release lock_.
1764 // REQUIRES: lock is held.
1765 // Tries to make room for a TCEntry. If the cache is full it will try to
1766 // expand it at the cost of some other cache size. Return false if there is
1768 bool MakeCacheSpace();
1770 // REQUIRES: lock_ for locked_size_class is held.
1771 // Picks a "random" size class to steal TCEntry slot from. In reality it
1772 // just iterates over the sizeclasses but does so without taking a lock.
1773 // Returns true on success.
1774 // May temporarily lock a "random" size class.
1775 static bool EvictRandomSizeClass(size_t locked_size_class
, bool force
);
1777 // REQUIRES: lock_ is *not* held.
1778 // Tries to shrink the Cache. If force is true it will relase objects to
1779 // spans if it allows it to shrink the cache. Return false if it failed to
1780 // shrink the cache. Decrements cache_size_ on succeess.
1781 // May temporarily take lock_. If it takes lock_, the locked_size_class
1782 // lock is released to the thread from holding two size class locks
1783 // concurrently which could lead to a deadlock.
1784 bool ShrinkCache(int locked_size_class
, bool force
);
1786 // This lock protects all the data members. cached_entries and cache_size_
1787 // may be looked at without holding the lock.
1790 // We keep linked lists of empty and non-empty spans.
1791 size_t size_class_
; // My size class
1792 Span empty_
; // Dummy header for list of empty spans
1793 Span nonempty_
; // Dummy header for list of non-empty spans
1794 size_t counter_
; // Number of free objects in cache entry
1796 // Here we reserve space for TCEntry cache slots. Since one size class can
1797 // end up getting all the TCEntries quota in the system we just preallocate
1798 // sufficient number of entries here.
1799 TCEntry tc_slots_
[kNumTransferEntries
];
1801 // Number of currently used cached entries in tc_slots_. This variable is
1802 // updated under a lock but can be read without one.
1803 int32_t used_slots_
;
1804 // The current number of slots for this size class. This is an
1805 // adaptive value that is increased if there is lots of traffic
1806 // on a given size class.
1807 int32_t cache_size_
;
1810 // Pad each CentralCache object to multiple of 64 bytes
1811 class TCMalloc_Central_FreeListPadded
: public TCMalloc_Central_FreeList
{
1813 char pad_
[(64 - (sizeof(TCMalloc_Central_FreeList
) % 64)) % 64];
1816 //-------------------------------------------------------------------
1818 //-------------------------------------------------------------------
1820 // Central cache -- a collection of free-lists, one per size-class.
1821 // We have a separate lock per free-list to reduce contention.
1822 static TCMalloc_Central_FreeListPadded central_cache
[kNumClasses
];
1824 // Page-level allocator
1825 static SpinLock pageheap_lock
= SPINLOCK_INITIALIZER
;
1828 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned
));
1830 static void* pageheap_memory
[(sizeof(TCMalloc_PageHeap
) + sizeof(void*) - 1) / sizeof(void*)];
1832 static bool phinited
= false;
1834 // Avoid extra level of indirection by making "pageheap" be just an alias
1835 // of pageheap_memory.
1838 TCMalloc_PageHeap
* m_pageHeap
;
1841 static inline TCMalloc_PageHeap
* getPageHeap()
1843 PageHeapUnion u
= { &pageheap_memory
[0] };
1844 return u
.m_pageHeap
;
1847 #define pageheap getPageHeap()
1849 // If TLS is available, we also store a copy
1850 // of the per-thread object in a __thread variable
1851 // since __thread variables are faster to read
1852 // than pthread_getspecific(). We still need
1853 // pthread_setspecific() because __thread
1854 // variables provide no way to run cleanup
1855 // code when a thread is destroyed.
1857 static __thread TCMalloc_ThreadCache
*threadlocal_heap
;
1859 // Thread-specific key. Initialization here is somewhat tricky
1860 // because some Linux startup code invokes malloc() before it
1861 // is in a good enough state to handle pthread_keycreate().
1862 // Therefore, we use TSD keys only after tsd_inited is set to true.
1863 // Until then, we use a slow path to get the heap object.
1864 static bool tsd_inited
= false;
1865 static pthread_key_t heap_key
;
1867 DWORD tlsIndex
= TLS_OUT_OF_INDEXES
;
1870 static ALWAYS_INLINE
void setThreadHeap(TCMalloc_ThreadCache
* heap
)
1872 // still do pthread_setspecific when using MSVC fast TLS to
1873 // benefit from the delete callback.
1874 pthread_setspecific(heap_key
, heap
);
1876 TlsSetValue(tlsIndex
, heap
);
1880 // Allocator for thread heaps
1881 static PageHeapAllocator
<TCMalloc_ThreadCache
> threadheap_allocator
;
1883 // Linked list of heap objects. Protected by pageheap_lock.
1884 static TCMalloc_ThreadCache
* thread_heaps
= NULL
;
1885 static int thread_heap_count
= 0;
1887 // Overall thread cache size. Protected by pageheap_lock.
1888 static size_t overall_thread_cache_size
= kDefaultOverallThreadCacheSize
;
1890 // Global per-thread cache size. Writes are protected by
1891 // pageheap_lock. Reads are done without any locking, which should be
1892 // fine as long as size_t can be written atomically and we don't place
1893 // invariants between this variable and other pieces of state.
1894 static volatile size_t per_thread_cache_size
= kMaxThreadCacheSize
;
1896 //-------------------------------------------------------------------
1897 // Central cache implementation
1898 //-------------------------------------------------------------------
1900 void TCMalloc_Central_FreeList::Init(size_t cl
) {
1904 DLL_Init(&nonempty_
);
1909 ASSERT(cache_size_
<= kNumTransferEntries
);
1912 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start
) {
1914 void *next
= SLL_Next(start
);
1915 ReleaseToSpans(start
);
1920 ALWAYS_INLINE
void TCMalloc_Central_FreeList::ReleaseToSpans(void* object
) {
1921 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
1922 Span
* span
= pageheap
->GetDescriptor(p
);
1923 ASSERT(span
!= NULL
);
1924 ASSERT(span
->refcount
> 0);
1926 // If span is empty, move it to non-empty list
1927 if (span
->objects
== NULL
) {
1929 DLL_Prepend(&nonempty_
, span
);
1930 Event(span
, 'N', 0);
1933 // The following check is expensive, so it is disabled by default
1935 // Check that object does not occur in list
1937 for (void* p
= span
->objects
; p
!= NULL
; p
= *((void**) p
)) {
1938 ASSERT(p
!= object
);
1941 ASSERT(got
+ span
->refcount
==
1942 (span
->length
<<kPageShift
)/ByteSizeForClass(span
->sizeclass
));
1947 if (span
->refcount
== 0) {
1948 Event(span
, '#', 0);
1949 counter_
-= (span
->length
<<kPageShift
) / ByteSizeForClass(span
->sizeclass
);
1952 // Release central list lock while operating on pageheap
1955 SpinLockHolder
h(&pageheap_lock
);
1956 pageheap
->Delete(span
);
1960 *(reinterpret_cast<void**>(object
)) = span
->objects
;
1961 span
->objects
= object
;
1965 ALWAYS_INLINE
bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
1966 size_t locked_size_class
, bool force
) {
1967 static int race_counter
= 0;
1968 int t
= race_counter
++; // Updated without a lock, but who cares.
1969 if (t
>= static_cast<int>(kNumClasses
)) {
1970 while (t
>= static_cast<int>(kNumClasses
)) {
1976 ASSERT(t
< static_cast<int>(kNumClasses
));
1977 if (t
== static_cast<int>(locked_size_class
)) return false;
1978 return central_cache
[t
].ShrinkCache(static_cast<int>(locked_size_class
), force
);
1981 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
1982 // Is there room in the cache?
1983 if (used_slots_
< cache_size_
) return true;
1984 // Check if we can expand this cache?
1985 if (cache_size_
== kNumTransferEntries
) return false;
1986 // Ok, we'll try to grab an entry from some other size class.
1987 if (EvictRandomSizeClass(size_class_
, false) ||
1988 EvictRandomSizeClass(size_class_
, true)) {
1989 // Succeeded in evicting, we're going to make our cache larger.
1998 class LockInverter
{
2000 SpinLock
*held_
, *temp_
;
2002 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
2003 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
2004 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
2008 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class
, bool force
) {
2009 // Start with a quick check without taking a lock.
2010 if (cache_size_
== 0) return false;
2011 // We don't evict from a full cache unless we are 'forcing'.
2012 if (force
== false && used_slots_
== cache_size_
) return false;
2014 // Grab lock, but first release the other lock held by this thread. We use
2015 // the lock inverter to ensure that we never hold two size class locks
2016 // concurrently. That can create a deadlock because there is no well
2017 // defined nesting order.
2018 LockInverter
li(¢ral_cache
[locked_size_class
].lock_
, &lock_
);
2019 ASSERT(used_slots_
<= cache_size_
);
2020 ASSERT(0 <= cache_size_
);
2021 if (cache_size_
== 0) return false;
2022 if (used_slots_
== cache_size_
) {
2023 if (force
== false) return false;
2024 // ReleaseListToSpans releases the lock, so we have to make all the
2025 // updates to the central list before calling it.
2028 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
2035 void TCMalloc_Central_FreeList::InsertRange(void *start
, void *end
, int N
) {
2036 SpinLockHolder
h(&lock_
);
2037 if (N
== num_objects_to_move
[size_class_
] &&
2039 int slot
= used_slots_
++;
2041 ASSERT(slot
< kNumTransferEntries
);
2042 TCEntry
*entry
= &tc_slots_
[slot
];
2043 entry
->head
= start
;
2047 ReleaseListToSpans(start
);
2050 void TCMalloc_Central_FreeList::RemoveRange(void **start
, void **end
, int *N
) {
2054 SpinLockHolder
h(&lock_
);
2055 if (num
== num_objects_to_move
[size_class_
] && used_slots_
> 0) {
2056 int slot
= --used_slots_
;
2058 TCEntry
*entry
= &tc_slots_
[slot
];
2059 *start
= entry
->head
;
2064 // TODO: Prefetch multiple TCEntries?
2065 void *tail
= FetchFromSpansSafe();
2067 // We are completely out of memory.
2068 *start
= *end
= NULL
;
2073 SLL_SetNext(tail
, NULL
);
2076 while (count
< num
) {
2077 void *t
= FetchFromSpans();
2088 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2089 void *t
= FetchFromSpans();
2092 t
= FetchFromSpans();
2097 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2098 if (DLL_IsEmpty(&nonempty_
)) return NULL
;
2099 Span
* span
= nonempty_
.next
;
2101 ASSERT(span
->objects
!= NULL
);
2103 void* result
= span
->objects
;
2104 span
->objects
= *(reinterpret_cast<void**>(result
));
2105 if (span
->objects
== NULL
) {
2106 // Move to empty list
2108 DLL_Prepend(&empty_
, span
);
2109 Event(span
, 'E', 0);
2115 // Fetch memory from the system and add to the central cache freelist.
2116 ALWAYS_INLINE
void TCMalloc_Central_FreeList::Populate() {
2117 // Release central list lock while operating on pageheap
2119 const size_t npages
= class_to_pages
[size_class_
];
2123 SpinLockHolder
h(&pageheap_lock
);
2124 span
= pageheap
->New(npages
);
2125 if (span
) pageheap
->RegisterSizeClass(span
, size_class_
);
2128 MESSAGE("allocation failed: %d\n", errno
);
2132 ASSERT(span
->length
== npages
);
2133 // Cache sizeclass info eagerly. Locking is not necessary.
2134 // (Instead of being eager, we could just replace any stale info
2135 // about this span, but that seems to be no better in practice.)
2136 for (size_t i
= 0; i
< npages
; i
++) {
2137 pageheap
->CacheSizeClass(span
->start
+ i
, size_class_
);
2140 // Split the block into pieces and add to the free-list
2141 // TODO: coloring of objects to avoid cache conflicts?
2142 void** tail
= &span
->objects
;
2143 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
2144 char* limit
= ptr
+ (npages
<< kPageShift
);
2145 const size_t size
= ByteSizeForClass(size_class_
);
2148 while ((nptr
= ptr
+ size
) <= limit
) {
2150 tail
= reinterpret_cast<void**>(ptr
);
2154 ASSERT(ptr
<= limit
);
2156 span
->refcount
= 0; // No sub-object in use yet
2158 // Add span to list of non-empty spans
2160 DLL_Prepend(&nonempty_
, span
);
2164 //-------------------------------------------------------------------
2165 // TCMalloc_ThreadCache implementation
2166 //-------------------------------------------------------------------
2168 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k
) {
2169 if (bytes_until_sample_
< k
) {
2173 bytes_until_sample_
-= k
;
2178 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid
) {
2183 in_setspecific_
= false;
2184 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2188 // Initialize RNG -- run it for a bit to get to good values
2189 bytes_until_sample_
= 0;
2190 rnd_
= static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2191 for (int i
= 0; i
< 100; i
++) {
2192 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter
* 2));
2196 void TCMalloc_ThreadCache::Cleanup() {
2197 // Put unused memory back into central cache
2198 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2199 if (list_
[cl
].length() > 0) {
2200 ReleaseToCentralCache(cl
, list_
[cl
].length());
2205 ALWAYS_INLINE
void* TCMalloc_ThreadCache::Allocate(size_t size
) {
2206 ASSERT(size
<= kMaxSize
);
2207 const size_t cl
= SizeClass(size
);
2208 FreeList
* list
= &list_
[cl
];
2209 size_t allocationSize
= ByteSizeForClass(cl
);
2210 if (list
->empty()) {
2211 FetchFromCentralCache(cl
, allocationSize
);
2212 if (list
->empty()) return NULL
;
2214 size_
-= allocationSize
;
2218 inline void TCMalloc_ThreadCache::Deallocate(void* ptr
, size_t cl
) {
2219 size_
+= ByteSizeForClass(cl
);
2220 FreeList
* list
= &list_
[cl
];
2222 // If enough data is free, put back into central cache
2223 if (list
->length() > kMaxFreeListLength
) {
2224 ReleaseToCentralCache(cl
, num_objects_to_move
[cl
]);
2226 if (size_
>= per_thread_cache_size
) Scavenge();
2229 // Remove some objects of class "cl" from central cache and add to thread heap
2230 ALWAYS_INLINE
void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl
, size_t allocationSize
) {
2231 int fetch_count
= num_objects_to_move
[cl
];
2233 central_cache
[cl
].RemoveRange(&start
, &end
, &fetch_count
);
2234 list_
[cl
].PushRange(fetch_count
, start
, end
);
2235 size_
+= allocationSize
* fetch_count
;
2238 // Remove some objects of class "cl" from thread heap and add to central cache
2239 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl
, int N
) {
2241 FreeList
* src
= &list_
[cl
];
2242 if (N
> src
->length()) N
= src
->length();
2243 size_
-= N
*ByteSizeForClass(cl
);
2245 // We return prepackaged chains of the correct size to the central cache.
2246 // TODO: Use the same format internally in the thread caches?
2247 int batch_size
= num_objects_to_move
[cl
];
2248 while (N
> batch_size
) {
2250 src
->PopRange(batch_size
, &head
, &tail
);
2251 central_cache
[cl
].InsertRange(head
, tail
, batch_size
);
2255 src
->PopRange(N
, &head
, &tail
);
2256 central_cache
[cl
].InsertRange(head
, tail
, N
);
2259 // Release idle memory to the central cache
2260 inline void TCMalloc_ThreadCache::Scavenge() {
2261 // If the low-water mark for the free list is L, it means we would
2262 // not have had to allocate anything from the central cache even if
2263 // we had reduced the free list size by L. We aim to get closer to
2264 // that situation by dropping L/2 nodes from the free list. This
2265 // may not release much memory, but if so we will call scavenge again
2266 // pretty soon and the low-water marks will be high on that call.
2267 //int64 start = CycleClock::Now();
2269 for (size_t cl
= 0; cl
< kNumClasses
; cl
++) {
2270 FreeList
* list
= &list_
[cl
];
2271 const int lowmark
= list
->lowwatermark();
2273 const int drop
= (lowmark
> 1) ? lowmark
/2 : 1;
2274 ReleaseToCentralCache(cl
, drop
);
2276 list
->clear_lowwatermark();
2279 //int64 finish = CycleClock::Now();
2281 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2284 void TCMalloc_ThreadCache::PickNextSample(size_t k
) {
2285 // Make next "random" number
2286 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2287 static const uint32_t kPoly
= (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2289 rnd_
= (r
<< 1) ^ ((static_cast<int32_t>(r
) >> 31) & kPoly
);
2291 // Next point is "rnd_ % (sample_period)". I.e., average
2292 // increment is "sample_period/2".
2293 const int flag_value
= static_cast<int>(FLAGS_tcmalloc_sample_parameter
);
2294 static int last_flag_value
= -1;
2296 if (flag_value
!= last_flag_value
) {
2297 SpinLockHolder
h(&sample_period_lock
);
2299 for (i
= 0; i
< (static_cast<int>(sizeof(primes_list
)/sizeof(primes_list
[0])) - 1); i
++) {
2300 if (primes_list
[i
] >= flag_value
) {
2304 sample_period
= primes_list
[i
];
2305 last_flag_value
= flag_value
;
2308 bytes_until_sample_
+= rnd_
% sample_period
;
2310 if (k
> (static_cast<size_t>(-1) >> 2)) {
2311 // If the user has asked for a huge allocation then it is possible
2312 // for the code below to loop infinitely. Just return (note that
2313 // this throws off the sampling accuracy somewhat, but a user who
2314 // is allocating more than 1G of memory at a time can live with a
2315 // minor inaccuracy in profiling of small allocations, and also
2316 // would rather not wait for the loop below to terminate).
2320 while (bytes_until_sample_
< k
) {
2321 // Increase bytes_until_sample_ by enough average sampling periods
2322 // (sample_period >> 1) to allow us to sample past the current
2324 bytes_until_sample_
+= (sample_period
>> 1);
2327 bytes_until_sample_
-= k
;
2330 void TCMalloc_ThreadCache::InitModule() {
2331 // There is a slight potential race here because of double-checked
2332 // locking idiom. However, as long as the program does a small
2333 // allocation before switching to multi-threaded mode, we will be
2334 // fine. We increase the chances of doing such a small allocation
2335 // by doing one in the constructor of the module_enter_exit_hook
2336 // object declared below.
2337 SpinLockHolder
h(&pageheap_lock
);
2343 threadheap_allocator
.Init();
2344 span_allocator
.Init();
2345 span_allocator
.New(); // Reduce cache conflicts
2346 span_allocator
.New(); // Reduce cache conflicts
2347 stacktrace_allocator
.Init();
2348 DLL_Init(&sampled_objects
);
2349 for (size_t i
= 0; i
< kNumClasses
; ++i
) {
2350 central_cache
[i
].Init(i
);
2354 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
2355 FastMallocZone::init();
2360 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid
) {
2361 // Create the heap and add it to the linked list
2362 TCMalloc_ThreadCache
*heap
= threadheap_allocator
.New();
2364 heap
->next_
= thread_heaps
;
2366 if (thread_heaps
!= NULL
) thread_heaps
->prev_
= heap
;
2367 thread_heaps
= heap
;
2368 thread_heap_count
++;
2369 RecomputeThreadCacheSize();
2373 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetThreadHeap() {
2375 // __thread is faster, but only when the kernel supports it
2376 if (KernelSupportsTLS())
2377 return threadlocal_heap
;
2378 #elif COMPILER(MSVC)
2379 return static_cast<TCMalloc_ThreadCache
*>(TlsGetValue(tlsIndex
));
2381 return static_cast<TCMalloc_ThreadCache
*>(pthread_getspecific(heap_key
));
2385 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCache() {
2386 TCMalloc_ThreadCache
* ptr
= NULL
;
2390 ptr
= GetThreadHeap();
2392 if (ptr
== NULL
) ptr
= CreateCacheIfNecessary();
2396 // In deletion paths, we do not try to create a thread-cache. This is
2397 // because we may be in the thread destruction code and may have
2398 // already cleaned up the cache for this thread.
2399 inline TCMalloc_ThreadCache
* TCMalloc_ThreadCache::GetCacheIfPresent() {
2400 if (!tsd_inited
) return NULL
;
2401 void* const p
= GetThreadHeap();
2402 return reinterpret_cast<TCMalloc_ThreadCache
*>(p
);
2405 void TCMalloc_ThreadCache::InitTSD() {
2406 ASSERT(!tsd_inited
);
2407 pthread_key_create(&heap_key
, DestroyThreadCache
);
2409 tlsIndex
= TlsAlloc();
2414 // We may have used a fake pthread_t for the main thread. Fix it.
2416 memset(&zero
, 0, sizeof(zero
));
2419 SpinLockHolder
h(&pageheap_lock
);
2421 ASSERT(pageheap_lock
.IsHeld());
2423 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2426 h
->tid_
= GetCurrentThreadId();
2429 if (pthread_equal(h
->tid_
, zero
)) {
2430 h
->tid_
= pthread_self();
2436 TCMalloc_ThreadCache
* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2437 // Initialize per-thread data if necessary
2438 TCMalloc_ThreadCache
* heap
= NULL
;
2440 SpinLockHolder
h(&pageheap_lock
);
2447 me
= GetCurrentThreadId();
2450 // Early on in glibc's life, we cannot even call pthread_self()
2453 memset(&me
, 0, sizeof(me
));
2455 me
= pthread_self();
2459 // This may be a recursive malloc call from pthread_setspecific()
2460 // In that case, the heap for this thread has already been created
2461 // and added to the linked list. So we search for that first.
2462 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2464 if (h
->tid_
== me
) {
2466 if (pthread_equal(h
->tid_
, me
)) {
2473 if (heap
== NULL
) heap
= NewHeap(me
);
2476 // We call pthread_setspecific() outside the lock because it may
2477 // call malloc() recursively. The recursive call will never get
2478 // here again because it will find the already allocated heap in the
2479 // linked list of heaps.
2480 if (!heap
->in_setspecific_
&& tsd_inited
) {
2481 heap
->in_setspecific_
= true;
2482 setThreadHeap(heap
);
2487 void TCMalloc_ThreadCache::BecomeIdle() {
2488 if (!tsd_inited
) return; // No caches yet
2489 TCMalloc_ThreadCache
* heap
= GetThreadHeap();
2490 if (heap
== NULL
) return; // No thread cache to remove
2491 if (heap
->in_setspecific_
) return; // Do not disturb the active caller
2493 heap
->in_setspecific_
= true;
2494 pthread_setspecific(heap_key
, NULL
);
2496 // Also update the copy in __thread
2497 threadlocal_heap
= NULL
;
2499 heap
->in_setspecific_
= false;
2500 if (GetThreadHeap() == heap
) {
2501 // Somehow heap got reinstated by a recursive call to malloc
2502 // from pthread_setspecific. We give up in this case.
2506 // We can now get rid of the heap
2510 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr
) {
2511 // Note that "ptr" cannot be NULL since pthread promises not
2512 // to invoke the destructor on NULL values, but for safety,
2514 if (ptr
== NULL
) return;
2516 // Prevent fast path of GetThreadHeap() from returning heap.
2517 threadlocal_heap
= NULL
;
2519 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache
*>(ptr
));
2522 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache
* heap
) {
2523 // Remove all memory from heap
2526 // Remove from linked list
2527 SpinLockHolder
h(&pageheap_lock
);
2528 if (heap
->next_
!= NULL
) heap
->next_
->prev_
= heap
->prev_
;
2529 if (heap
->prev_
!= NULL
) heap
->prev_
->next_
= heap
->next_
;
2530 if (thread_heaps
== heap
) thread_heaps
= heap
->next_
;
2531 thread_heap_count
--;
2532 RecomputeThreadCacheSize();
2534 threadheap_allocator
.Delete(heap
);
2537 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
2538 // Divide available space across threads
2539 int n
= thread_heap_count
> 0 ? thread_heap_count
: 1;
2540 size_t space
= overall_thread_cache_size
/ n
;
2542 // Limit to allowed range
2543 if (space
< kMinThreadCacheSize
) space
= kMinThreadCacheSize
;
2544 if (space
> kMaxThreadCacheSize
) space
= kMaxThreadCacheSize
;
2546 per_thread_cache_size
= space
;
2549 void TCMalloc_ThreadCache::Print() const {
2550 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2551 MESSAGE(" %5" PRIuS
" : %4d len; %4d lo\n",
2552 ByteSizeForClass(cl
),
2554 list_
[cl
].lowwatermark());
2558 // Extract interesting stats
2559 struct TCMallocStats
{
2560 uint64_t system_bytes
; // Bytes alloced from system
2561 uint64_t thread_bytes
; // Bytes in thread caches
2562 uint64_t central_bytes
; // Bytes in central cache
2563 uint64_t transfer_bytes
; // Bytes in central transfer cache
2564 uint64_t pageheap_bytes
; // Bytes in page heap
2565 uint64_t metadata_bytes
; // Bytes alloced for metadata
2569 // Get stats into "r". Also get per-size-class counts if class_count != NULL
2570 static void ExtractStats(TCMallocStats
* r
, uint64_t* class_count
) {
2571 r
->central_bytes
= 0;
2572 r
->transfer_bytes
= 0;
2573 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2574 const int length
= central_cache
[cl
].length();
2575 const int tc_length
= central_cache
[cl
].tc_length();
2576 r
->central_bytes
+= static_cast<uint64_t>(ByteSizeForClass(cl
)) * length
;
2577 r
->transfer_bytes
+=
2578 static_cast<uint64_t>(ByteSizeForClass(cl
)) * tc_length
;
2579 if (class_count
) class_count
[cl
] = length
+ tc_length
;
2582 // Add stats from per-thread heaps
2583 r
->thread_bytes
= 0;
2585 SpinLockHolder
h(&pageheap_lock
);
2586 for (TCMalloc_ThreadCache
* h
= thread_heaps
; h
!= NULL
; h
= h
->next_
) {
2587 r
->thread_bytes
+= h
->Size();
2589 for (size_t cl
= 0; cl
< kNumClasses
; ++cl
) {
2590 class_count
[cl
] += h
->freelist_length(cl
);
2597 SpinLockHolder
h(&pageheap_lock
);
2598 r
->system_bytes
= pageheap
->SystemBytes();
2599 r
->metadata_bytes
= metadata_system_bytes
;
2600 r
->pageheap_bytes
= pageheap
->FreeBytes();
2606 // WRITE stats to "out"
2607 static void DumpStats(TCMalloc_Printer
* out
, int level
) {
2608 TCMallocStats stats
;
2609 uint64_t class_count
[kNumClasses
];
2610 ExtractStats(&stats
, (level
>= 2 ? class_count
: NULL
));
2613 out
->printf("------------------------------------------------\n");
2614 uint64_t cumulative
= 0;
2615 for (int cl
= 0; cl
< kNumClasses
; ++cl
) {
2616 if (class_count
[cl
] > 0) {
2617 uint64_t class_bytes
= class_count
[cl
] * ByteSizeForClass(cl
);
2618 cumulative
+= class_bytes
;
2619 out
->printf("class %3d [ %8" PRIuS
" bytes ] : "
2620 "%8" PRIu64
" objs; %5.1f MB; %5.1f cum MB\n",
2621 cl
, ByteSizeForClass(cl
),
2623 class_bytes
/ 1048576.0,
2624 cumulative
/ 1048576.0);
2628 SpinLockHolder
h(&pageheap_lock
);
2629 pageheap
->Dump(out
);
2632 const uint64_t bytes_in_use
= stats
.system_bytes
2633 - stats
.pageheap_bytes
2634 - stats
.central_bytes
2635 - stats
.transfer_bytes
2636 - stats
.thread_bytes
;
2638 out
->printf("------------------------------------------------\n"
2639 "MALLOC: %12" PRIu64
" Heap size\n"
2640 "MALLOC: %12" PRIu64
" Bytes in use by application\n"
2641 "MALLOC: %12" PRIu64
" Bytes free in page heap\n"
2642 "MALLOC: %12" PRIu64
" Bytes free in central cache\n"
2643 "MALLOC: %12" PRIu64
" Bytes free in transfer cache\n"
2644 "MALLOC: %12" PRIu64
" Bytes free in thread caches\n"
2645 "MALLOC: %12" PRIu64
" Spans in use\n"
2646 "MALLOC: %12" PRIu64
" Thread heaps in use\n"
2647 "MALLOC: %12" PRIu64
" Metadata allocated\n"
2648 "------------------------------------------------\n",
2651 stats
.pageheap_bytes
,
2652 stats
.central_bytes
,
2653 stats
.transfer_bytes
,
2655 uint64_t(span_allocator
.inuse()),
2656 uint64_t(threadheap_allocator
.inuse()),
2657 stats
.metadata_bytes
);
2660 static void PrintStats(int level
) {
2661 const int kBufferSize
= 16 << 10;
2662 char* buffer
= new char[kBufferSize
];
2663 TCMalloc_Printer
printer(buffer
, kBufferSize
);
2664 DumpStats(&printer
, level
);
2665 write(STDERR_FILENO
, buffer
, strlen(buffer
));
2669 static void** DumpStackTraces() {
2670 // Count how much space we need
2671 int needed_slots
= 0;
2673 SpinLockHolder
h(&pageheap_lock
);
2674 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2675 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2676 needed_slots
+= 3 + stack
->depth
;
2678 needed_slots
+= 100; // Slop in case sample grows
2679 needed_slots
+= needed_slots
/8; // An extra 12.5% slop
2682 void** result
= new void*[needed_slots
];
2683 if (result
== NULL
) {
2684 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
2689 SpinLockHolder
h(&pageheap_lock
);
2691 for (Span
* s
= sampled_objects
.next
; s
!= &sampled_objects
; s
= s
->next
) {
2692 ASSERT(used_slots
< needed_slots
); // Need to leave room for terminator
2693 StackTrace
* stack
= reinterpret_cast<StackTrace
*>(s
->objects
);
2694 if (used_slots
+ 3 + stack
->depth
>= needed_slots
) {
2699 result
[used_slots
+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
2700 result
[used_slots
+1] = reinterpret_cast<void*>(stack
->size
);
2701 result
[used_slots
+2] = reinterpret_cast<void*>(stack
->depth
);
2702 for (int d
= 0; d
< stack
->depth
; d
++) {
2703 result
[used_slots
+3+d
] = stack
->stack
[d
];
2705 used_slots
+= 3 + stack
->depth
;
2707 result
[used_slots
] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
2714 // TCMalloc's support for extra malloc interfaces
2715 class TCMallocImplementation
: public MallocExtension
{
2717 virtual void GetStats(char* buffer
, int buffer_length
) {
2718 ASSERT(buffer_length
> 0);
2719 TCMalloc_Printer
printer(buffer
, buffer_length
);
2721 // Print level one stats unless lots of space is available
2722 if (buffer_length
< 10000) {
2723 DumpStats(&printer
, 1);
2725 DumpStats(&printer
, 2);
2729 virtual void** ReadStackTraces() {
2730 return DumpStackTraces();
2733 virtual bool GetNumericProperty(const char* name
, size_t* value
) {
2734 ASSERT(name
!= NULL
);
2736 if (strcmp(name
, "generic.current_allocated_bytes") == 0) {
2737 TCMallocStats stats
;
2738 ExtractStats(&stats
, NULL
);
2739 *value
= stats
.system_bytes
2740 - stats
.thread_bytes
2741 - stats
.central_bytes
2742 - stats
.pageheap_bytes
;
2746 if (strcmp(name
, "generic.heap_size") == 0) {
2747 TCMallocStats stats
;
2748 ExtractStats(&stats
, NULL
);
2749 *value
= stats
.system_bytes
;
2753 if (strcmp(name
, "tcmalloc.slack_bytes") == 0) {
2754 // We assume that bytes in the page heap are not fragmented too
2755 // badly, and are therefore available for allocation.
2756 SpinLockHolder
l(&pageheap_lock
);
2757 *value
= pageheap
->FreeBytes();
2761 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2762 SpinLockHolder
l(&pageheap_lock
);
2763 *value
= overall_thread_cache_size
;
2767 if (strcmp(name
, "tcmalloc.current_total_thread_cache_bytes") == 0) {
2768 TCMallocStats stats
;
2769 ExtractStats(&stats
, NULL
);
2770 *value
= stats
.thread_bytes
;
2777 virtual bool SetNumericProperty(const char* name
, size_t value
) {
2778 ASSERT(name
!= NULL
);
2780 if (strcmp(name
, "tcmalloc.max_total_thread_cache_bytes") == 0) {
2781 // Clip the value to a reasonable range
2782 if (value
< kMinThreadCacheSize
) value
= kMinThreadCacheSize
;
2783 if (value
> (1<<30)) value
= (1<<30); // Limit to 1GB
2785 SpinLockHolder
l(&pageheap_lock
);
2786 overall_thread_cache_size
= static_cast<size_t>(value
);
2787 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
2794 virtual void MarkThreadIdle() {
2795 TCMalloc_ThreadCache::BecomeIdle();
2798 virtual void ReleaseFreeMemory() {
2799 SpinLockHolder
h(&pageheap_lock
);
2800 pageheap
->ReleaseFreePages();
2805 // The constructor allocates an object to ensure that initialization
2806 // runs before main(), and therefore we do not have a chance to become
2807 // multi-threaded before initialization. We also create the TSD key
2808 // here. Presumably by the time this constructor runs, glibc is in
2809 // good enough shape to handle pthread_key_create().
2811 // The constructor also takes the opportunity to tell STL to use
2812 // tcmalloc. We want to do this early, before construct time, so
2813 // all user STL allocations go through tcmalloc (which works really
2816 // The destructor prints stats when the program exits.
2817 class TCMallocGuard
{
2821 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
2822 // Check whether the kernel also supports TLS (needs to happen at runtime)
2823 CheckIfKernelSupportsTLS();
2826 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
2827 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
2831 TCMalloc_ThreadCache::InitTSD();
2834 MallocExtension::Register(new TCMallocImplementation
);
2840 const char* env
= getenv("MALLOCSTATS");
2842 int level
= atoi(env
);
2843 if (level
< 1) level
= 1;
2847 UnpatchWindowsFunctions();
2854 static TCMallocGuard module_enter_exit_hook
;
2858 //-------------------------------------------------------------------
2859 // Helpers for the exported routines below
2860 //-------------------------------------------------------------------
2864 static Span
* DoSampledAllocation(size_t size
) {
2866 // Grab the stack trace outside the heap lock
2868 tmp
.depth
= GetStackTrace(tmp
.stack
, kMaxStackDepth
, 1);
2871 SpinLockHolder
h(&pageheap_lock
);
2873 Span
*span
= pageheap
->New(pages(size
== 0 ? 1 : size
));
2878 // Allocate stack trace
2879 StackTrace
*stack
= stacktrace_allocator
.New();
2880 if (stack
== NULL
) {
2881 // Sampling failed because of lack of memory
2887 span
->objects
= stack
;
2888 DLL_Prepend(&sampled_objects
, span
);
2894 static inline bool CheckCachedSizeClass(void *ptr
) {
2895 PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
2896 size_t cached_value
= pageheap
->GetSizeClassIfCached(p
);
2897 return cached_value
== 0 ||
2898 cached_value
== pageheap
->GetDescriptor(p
)->sizeclass
;
2901 static inline void* CheckedMallocResult(void *result
)
2903 ASSERT(result
== 0 || CheckCachedSizeClass(result
));
2907 static inline void* SpanToMallocResult(Span
*span
) {
2908 pageheap
->CacheSizeClass(span
->start
, 0);
2910 CheckedMallocResult(reinterpret_cast<void*>(span
->start
<< kPageShift
));
2913 static ALWAYS_INLINE
void* do_malloc(size_t size
) {
2917 ASSERT(!isForbidden());
2920 // The following call forces module initialization
2921 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
2923 if ((FLAGS_tcmalloc_sample_parameter
> 0) && heap
->SampleAllocation(size
)) {
2924 Span
* span
= DoSampledAllocation(size
);
2926 ret
= SpanToMallocResult(span
);
2930 if (size
> kMaxSize
) {
2931 // Use page-level allocator
2932 SpinLockHolder
h(&pageheap_lock
);
2933 Span
* span
= pageheap
->New(pages(size
));
2935 ret
= SpanToMallocResult(span
);
2938 // The common case, and also the simplest. This just pops the
2939 // size-appropriate freelist, afer replenishing it if it's empty.
2940 ret
= CheckedMallocResult(heap
->Allocate(size
));
2942 if (ret
== NULL
) errno
= ENOMEM
;
2946 static ALWAYS_INLINE
void do_free(void* ptr
) {
2947 if (ptr
== NULL
) return;
2948 ASSERT(pageheap
!= NULL
); // Should not call free() before malloc()
2949 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
2951 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
2954 span
= pageheap
->GetDescriptor(p
);
2955 cl
= span
->sizeclass
;
2956 pageheap
->CacheSizeClass(p
, cl
);
2959 ASSERT(!pageheap
->GetDescriptor(p
)->sample
);
2960 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCacheIfPresent();
2962 heap
->Deallocate(ptr
, cl
);
2964 // Delete directly into central cache
2965 SLL_SetNext(ptr
, NULL
);
2966 central_cache
[cl
].InsertRange(ptr
, ptr
, 1);
2969 SpinLockHolder
h(&pageheap_lock
);
2970 ASSERT(reinterpret_cast<uintptr_t>(ptr
) % kPageSize
== 0);
2971 ASSERT(span
!= NULL
&& span
->start
== p
);
2974 stacktrace_allocator
.Delete(reinterpret_cast<StackTrace
*>(span
->objects
));
2975 span
->objects
= NULL
;
2977 pageheap
->Delete(span
);
2982 // For use by exported routines below that want specific alignments
2984 // Note: this code can be slow, and can significantly fragment memory.
2985 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
2986 // not be invoked very often. This requirement simplifies our
2987 // implementation and allows us to tune for expected allocation
2989 static void* do_memalign(size_t align
, size_t size
) {
2990 ASSERT((align
& (align
- 1)) == 0);
2992 if (pageheap
== NULL
) TCMalloc_ThreadCache::InitModule();
2994 // Allocate at least one byte to avoid boundary conditions below
2995 if (size
== 0) size
= 1;
2997 if (size
<= kMaxSize
&& align
< kPageSize
) {
2998 // Search through acceptable size classes looking for one with
2999 // enough alignment. This depends on the fact that
3000 // InitSizeClasses() currently produces several size classes that
3001 // are aligned at powers of two. We will waste time and space if
3002 // we miss in the size class array, but that is deemed acceptable
3003 // since memalign() should be used rarely.
3004 size_t cl
= SizeClass(size
);
3005 while (cl
< kNumClasses
&& ((class_to_size
[cl
] & (align
- 1)) != 0)) {
3008 if (cl
< kNumClasses
) {
3009 TCMalloc_ThreadCache
* heap
= TCMalloc_ThreadCache::GetCache();
3010 return CheckedMallocResult(heap
->Allocate(class_to_size
[cl
]));
3014 // We will allocate directly from the page heap
3015 SpinLockHolder
h(&pageheap_lock
);
3017 if (align
<= kPageSize
) {
3018 // Any page-level allocation will be fine
3019 // TODO: We could put the rest of this page in the appropriate
3020 // TODO: cache but it does not seem worth it.
3021 Span
* span
= pageheap
->New(pages(size
));
3022 return span
== NULL
? NULL
: SpanToMallocResult(span
);
3025 // Allocate extra pages and carve off an aligned portion
3026 const Length alloc
= pages(size
+ align
);
3027 Span
* span
= pageheap
->New(alloc
);
3028 if (span
== NULL
) return NULL
;
3030 // Skip starting portion so that we end up aligned
3032 while ((((span
->start
+skip
) << kPageShift
) & (align
- 1)) != 0) {
3035 ASSERT(skip
< alloc
);
3037 Span
* rest
= pageheap
->Split(span
, skip
);
3038 pageheap
->Delete(span
);
3042 // Skip trailing portion that we do not need to return
3043 const Length needed
= pages(size
);
3044 ASSERT(span
->length
>= needed
);
3045 if (span
->length
> needed
) {
3046 Span
* trailer
= pageheap
->Split(span
, needed
);
3047 pageheap
->Delete(trailer
);
3049 return SpanToMallocResult(span
);
3053 // Helpers for use by exported routines below:
3056 static inline void do_malloc_stats() {
3061 static inline int do_mallopt(int, int) {
3062 return 1; // Indicates error
3065 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3066 static inline struct mallinfo
do_mallinfo() {
3067 TCMallocStats stats
;
3068 ExtractStats(&stats
, NULL
);
3070 // Just some of the fields are filled in.
3071 struct mallinfo info
;
3072 memset(&info
, 0, sizeof(info
));
3074 // Unfortunately, the struct contains "int" field, so some of the
3075 // size values will be truncated.
3076 info
.arena
= static_cast<int>(stats
.system_bytes
);
3077 info
.fsmblks
= static_cast<int>(stats
.thread_bytes
3078 + stats
.central_bytes
3079 + stats
.transfer_bytes
);
3080 info
.fordblks
= static_cast<int>(stats
.pageheap_bytes
);
3081 info
.uordblks
= static_cast<int>(stats
.system_bytes
3082 - stats
.thread_bytes
3083 - stats
.central_bytes
3084 - stats
.transfer_bytes
3085 - stats
.pageheap_bytes
);
3091 //-------------------------------------------------------------------
3092 // Exported routines
3093 //-------------------------------------------------------------------
3095 // CAVEAT: The code structure below ensures that MallocHook methods are always
3096 // called from the stack frame of the invoked allocation function.
3097 // heap-checker.cc depends on this to start a stack trace from
3098 // the call to the (de)allocation function.
3103 void* malloc(size_t size
) {
3104 void* result
= do_malloc(size
);
3106 MallocHook::InvokeNewHook(result
, size
);
3114 void free(void* ptr
) {
3116 MallocHook::InvokeDeleteHook(ptr
);
3124 void* calloc(size_t n
, size_t elem_size
) {
3125 const size_t totalBytes
= n
* elem_size
;
3127 // Protect against overflow
3128 if (n
> 1 && elem_size
&& (totalBytes
/ elem_size
) != n
)
3131 void* result
= do_malloc(totalBytes
);
3132 if (result
!= NULL
) {
3133 memset(result
, 0, totalBytes
);
3136 MallocHook::InvokeNewHook(result
, totalBytes
);
3144 void cfree(void* ptr
) {
3146 MallocHook::InvokeDeleteHook(ptr
);
3154 void* realloc(void* old_ptr
, size_t new_size
) {
3155 if (old_ptr
== NULL
) {
3156 void* result
= do_malloc(new_size
);
3158 MallocHook::InvokeNewHook(result
, new_size
);
3162 if (new_size
== 0) {
3164 MallocHook::InvokeDeleteHook(old_ptr
);
3170 // Get the size of the old entry
3171 const PageID p
= reinterpret_cast<uintptr_t>(old_ptr
) >> kPageShift
;
3172 size_t cl
= pageheap
->GetSizeClassIfCached(p
);
3176 span
= pageheap
->GetDescriptor(p
);
3177 cl
= span
->sizeclass
;
3178 pageheap
->CacheSizeClass(p
, cl
);
3181 old_size
= ByteSizeForClass(cl
);
3183 ASSERT(span
!= NULL
);
3184 old_size
= span
->length
<< kPageShift
;
3187 // Reallocate if the new size is larger than the old size,
3188 // or if the new size is significantly smaller than the old size.
3189 if ((new_size
> old_size
) || (AllocationSize(new_size
) < old_size
)) {
3190 // Need to reallocate
3191 void* new_ptr
= do_malloc(new_size
);
3192 if (new_ptr
== NULL
) {
3196 MallocHook::InvokeNewHook(new_ptr
, new_size
);
3198 memcpy(new_ptr
, old_ptr
, ((old_size
< new_size
) ? old_size
: new_size
));
3200 MallocHook::InvokeDeleteHook(old_ptr
);
3202 // We could use a variant of do_free() that leverages the fact
3203 // that we already know the sizeclass of old_ptr. The benefit
3204 // would be small, so don't bother.
3214 static SpinLock set_new_handler_lock
= SPINLOCK_INITIALIZER
;
3216 static inline void* cpp_alloc(size_t size
, bool nothrow
) {
3218 void* p
= do_malloc(size
);
3222 if (p
== NULL
) { // allocation failed
3223 // Get the current new handler. NB: this function is not
3224 // thread-safe. We make a feeble stab at making it so here, but
3225 // this lock only protects against tcmalloc interfering with
3226 // itself, not with other libraries calling set_new_handler.
3227 std::new_handler nh
;
3229 SpinLockHolder
h(&set_new_handler_lock
);
3230 nh
= std::set_new_handler(0);
3231 (void) std::set_new_handler(nh
);
3233 // If no new_handler is established, the allocation failed.
3235 if (nothrow
) return 0;
3236 throw std::bad_alloc();
3238 // Otherwise, try the new_handler. If it returns, retry the
3239 // allocation. If it throws std::bad_alloc, fail the allocation.
3240 // if it throws something else, don't interfere.
3243 } catch (const std::bad_alloc
&) {
3244 if (!nothrow
) throw;
3247 } else { // allocation success
3254 void* operator new(size_t size
) {
3255 void* p
= cpp_alloc(size
, false);
3256 // We keep this next instruction out of cpp_alloc for a reason: when
3257 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3258 // new call into cpp_alloc, which messes up our whole section-based
3259 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3260 // isn't the last thing this fn calls, and prevents the folding.
3261 MallocHook::InvokeNewHook(p
, size
);
3265 void* operator new(size_t size
, const std::nothrow_t
&) __THROW
{
3266 void* p
= cpp_alloc(size
, true);
3267 MallocHook::InvokeNewHook(p
, size
);
3271 void operator delete(void* p
) __THROW
{
3272 MallocHook::InvokeDeleteHook(p
);
3276 void operator delete(void* p
, const std::nothrow_t
&) __THROW
{
3277 MallocHook::InvokeDeleteHook(p
);
3281 void* operator new[](size_t size
) {
3282 void* p
= cpp_alloc(size
, false);
3283 // We keep this next instruction out of cpp_alloc for a reason: when
3284 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3285 // new call into cpp_alloc, which messes up our whole section-based
3286 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3287 // isn't the last thing this fn calls, and prevents the folding.
3288 MallocHook::InvokeNewHook(p
, size
);
3292 void* operator new[](size_t size
, const std::nothrow_t
&) __THROW
{
3293 void* p
= cpp_alloc(size
, true);
3294 MallocHook::InvokeNewHook(p
, size
);
3298 void operator delete[](void* p
) __THROW
{
3299 MallocHook::InvokeDeleteHook(p
);
3303 void operator delete[](void* p
, const std::nothrow_t
&) __THROW
{
3304 MallocHook::InvokeDeleteHook(p
);
3308 extern "C" void* memalign(size_t align
, size_t size
) __THROW
{
3309 void* result
= do_memalign(align
, size
);
3310 MallocHook::InvokeNewHook(result
, size
);
3314 extern "C" int posix_memalign(void** result_ptr
, size_t align
, size_t size
)
3316 if (((align
% sizeof(void*)) != 0) ||
3317 ((align
& (align
- 1)) != 0) ||
3322 void* result
= do_memalign(align
, size
);
3323 MallocHook::InvokeNewHook(result
, size
);
3324 if (result
== NULL
) {
3327 *result_ptr
= result
;
3332 static size_t pagesize
= 0;
3334 extern "C" void* valloc(size_t size
) __THROW
{
3335 // Allocate page-aligned object of length >= size bytes
3336 if (pagesize
== 0) pagesize
= getpagesize();
3337 void* result
= do_memalign(pagesize
, size
);
3338 MallocHook::InvokeNewHook(result
, size
);
3342 extern "C" void* pvalloc(size_t size
) __THROW
{
3343 // Round up size to a multiple of pagesize
3344 if (pagesize
== 0) pagesize
= getpagesize();
3345 size
= (size
+ pagesize
- 1) & ~(pagesize
- 1);
3346 void* result
= do_memalign(pagesize
, size
);
3347 MallocHook::InvokeNewHook(result
, size
);
3351 extern "C" void malloc_stats(void) {
3355 extern "C" int mallopt(int cmd
, int value
) {
3356 return do_mallopt(cmd
, value
);
3359 #ifdef HAVE_STRUCT_MALLINFO
3360 extern "C" struct mallinfo
mallinfo(void) {
3361 return do_mallinfo();
3365 //-------------------------------------------------------------------
3366 // Some library routines on RedHat 9 allocate memory using malloc()
3367 // and free it using __libc_free() (or vice-versa). Since we provide
3368 // our own implementations of malloc/free, we need to make sure that
3369 // the __libc_XXX variants (defined as part of glibc) also point to
3370 // the same implementations.
3371 //-------------------------------------------------------------------
3373 #if defined(__GLIBC__)
3375 # if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
3376 // Potentially faster variants that use the gcc alias extension.
3377 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
3378 # define ALIAS(x) __attribute__ ((weak, alias (x)))
3379 void* __libc_malloc(size_t size
) ALIAS("malloc");
3380 void __libc_free(void* ptr
) ALIAS("free");
3381 void* __libc_realloc(void* ptr
, size_t size
) ALIAS("realloc");
3382 void* __libc_calloc(size_t n
, size_t size
) ALIAS("calloc");
3383 void __libc_cfree(void* ptr
) ALIAS("cfree");
3384 void* __libc_memalign(size_t align
, size_t s
) ALIAS("memalign");
3385 void* __libc_valloc(size_t size
) ALIAS("valloc");
3386 void* __libc_pvalloc(size_t size
) ALIAS("pvalloc");
3387 int __posix_memalign(void** r
, size_t a
, size_t s
) ALIAS("posix_memalign");
3389 # else /* not __GNUC__ */
3390 // Portable wrappers
3391 void* __libc_malloc(size_t size
) { return malloc(size
); }
3392 void __libc_free(void* ptr
) { free(ptr
); }
3393 void* __libc_realloc(void* ptr
, size_t size
) { return realloc(ptr
, size
); }
3394 void* __libc_calloc(size_t n
, size_t size
) { return calloc(n
, size
); }
3395 void __libc_cfree(void* ptr
) { cfree(ptr
); }
3396 void* __libc_memalign(size_t align
, size_t s
) { return memalign(align
, s
); }
3397 void* __libc_valloc(size_t size
) { return valloc(size
); }
3398 void* __libc_pvalloc(size_t size
) { return pvalloc(size
); }
3399 int __posix_memalign(void** r
, size_t a
, size_t s
) {
3400 return posix_memalign(r
, a
, s
);
3402 # endif /* __GNUC__ */
3404 #endif /* __GLIBC__ */
3406 // Override __libc_memalign in libc on linux boxes specially.
3407 // They have a bug in libc that causes them to (very rarely) allocate
3408 // with __libc_memalign() yet deallocate with free() and the
3409 // definitions above don't catch it.
3410 // This function is an exception to the rule of calling MallocHook method
3411 // from the stack frame of the allocation function;
3412 // heap-checker handles this special case explicitly.
3413 static void *MemalignOverride(size_t align
, size_t size
, const void *caller
)
3415 void* result
= do_memalign(align
, size
);
3416 MallocHook::InvokeNewHook(result
, size
);
3419 void *(*__memalign_hook
)(size_t, size_t, const void *) = MemalignOverride
;
3423 #if defined(WTF_CHANGES) && PLATFORM(DARWIN)
3424 #include <wtf/HashSet.h>
3426 class FreeObjectFinder
{
3427 const RemoteMemoryReader
& m_reader
;
3428 HashSet
<void*> m_freeObjects
;
3431 FreeObjectFinder(const RemoteMemoryReader
& reader
) : m_reader(reader
) { }
3433 void visit(void* ptr
) { m_freeObjects
.add(ptr
); }
3434 bool isFreeObject(void* ptr
) const { return m_freeObjects
.contains(ptr
); }
3435 size_t freeObjectCount() const { return m_freeObjects
.size(); }
3437 void findFreeObjects(TCMalloc_ThreadCache
* threadCache
)
3439 for (; threadCache
; threadCache
= (threadCache
->next_
? m_reader(threadCache
->next_
) : 0))
3440 threadCache
->enumerateFreeObjects(*this, m_reader
);
3443 void findFreeObjects(TCMalloc_Central_FreeListPadded
* centralFreeList
, size_t numSizes
)
3445 for (unsigned i
= 0; i
< numSizes
; i
++)
3446 centralFreeList
[i
].enumerateFreeObjects(*this, m_reader
);
3450 class PageMapFreeObjectFinder
{
3451 const RemoteMemoryReader
& m_reader
;
3452 FreeObjectFinder
& m_freeObjectFinder
;
3455 PageMapFreeObjectFinder(const RemoteMemoryReader
& reader
, FreeObjectFinder
& freeObjectFinder
)
3457 , m_freeObjectFinder(freeObjectFinder
)
3460 int visit(void* ptr
) const
3465 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3467 void* ptr
= reinterpret_cast<void*>(span
->start
<< kPageShift
);
3468 m_freeObjectFinder
.visit(ptr
);
3469 } else if (span
->sizeclass
) {
3470 // Walk the free list of the small-object span, keeping track of each object seen
3471 for (void* nextObject
= span
->objects
; nextObject
; nextObject
= *m_reader(reinterpret_cast<void**>(nextObject
)))
3472 m_freeObjectFinder
.visit(nextObject
);
3474 return span
->length
;
3478 class PageMapMemoryUsageRecorder
{
3481 unsigned m_typeMask
;
3482 vm_range_recorder_t
* m_recorder
;
3483 const RemoteMemoryReader
& m_reader
;
3484 const FreeObjectFinder
& m_freeObjectFinder
;
3485 mutable HashSet
<void*> m_seenPointers
;
3488 PageMapMemoryUsageRecorder(task_t task
, void* context
, unsigned typeMask
, vm_range_recorder_t
* recorder
, const RemoteMemoryReader
& reader
, const FreeObjectFinder
& freeObjectFinder
)
3490 , m_context(context
)
3491 , m_typeMask(typeMask
)
3492 , m_recorder(recorder
)
3494 , m_freeObjectFinder(freeObjectFinder
)
3497 int visit(void* ptr
) const
3502 Span
* span
= m_reader(reinterpret_cast<Span
*>(ptr
));
3503 if (m_seenPointers
.contains(ptr
))
3504 return span
->length
;
3505 m_seenPointers
.add(ptr
);
3507 // Mark the memory used for the Span itself as an administrative region
3508 vm_range_t ptrRange
= { reinterpret_cast<vm_address_t
>(ptr
), sizeof(Span
) };
3509 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3510 (*m_recorder
)(m_task
, m_context
, MALLOC_ADMIN_REGION_RANGE_TYPE
, &ptrRange
, 1);
3512 ptrRange
.address
= span
->start
<< kPageShift
;
3513 ptrRange
.size
= span
->length
* kPageSize
;
3515 // Mark the memory region the span represents as candidates for containing pointers
3516 if (m_typeMask
& (MALLOC_PTR_REGION_RANGE_TYPE
| MALLOC_ADMIN_REGION_RANGE_TYPE
))
3517 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_REGION_RANGE_TYPE
, &ptrRange
, 1);
3519 if (!span
->free
&& (m_typeMask
& MALLOC_PTR_IN_USE_RANGE_TYPE
)) {
3520 // If it's an allocated large object span, mark it as in use
3521 if (span
->sizeclass
== 0 && !m_freeObjectFinder
.isFreeObject(reinterpret_cast<void*>(ptrRange
.address
)))
3522 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &ptrRange
, 1);
3523 else if (span
->sizeclass
) {
3524 const size_t byteSize
= ByteSizeForClass(span
->sizeclass
);
3525 unsigned totalObjects
= (span
->length
<< kPageShift
) / byteSize
;
3526 ASSERT(span
->refcount
<= totalObjects
);
3527 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
3529 // Mark each allocated small object within the span as in use
3530 for (unsigned i
= 0; i
< totalObjects
; i
++) {
3531 char* thisObject
= ptr
+ (i
* byteSize
);
3532 if (m_freeObjectFinder
.isFreeObject(thisObject
))
3535 vm_range_t objectRange
= { reinterpret_cast<vm_address_t
>(thisObject
), byteSize
};
3536 (*m_recorder
)(m_task
, m_context
, MALLOC_PTR_IN_USE_RANGE_TYPE
, &objectRange
, 1);
3541 return span
->length
;
3545 kern_return_t
FastMallocZone::enumerate(task_t task
, void* context
, unsigned typeMask
, vm_address_t zoneAddress
, memory_reader_t reader
, vm_range_recorder_t recorder
)
3547 RemoteMemoryReader
memoryReader(task
, reader
);
3551 FastMallocZone
* mzone
= memoryReader(reinterpret_cast<FastMallocZone
*>(zoneAddress
));
3552 TCMalloc_PageHeap
* pageHeap
= memoryReader(mzone
->m_pageHeap
);
3553 TCMalloc_ThreadCache
** threadHeapsPointer
= memoryReader(mzone
->m_threadHeaps
);
3554 TCMalloc_ThreadCache
* threadHeaps
= memoryReader(*threadHeapsPointer
);
3556 TCMalloc_Central_FreeListPadded
* centralCaches
= memoryReader(mzone
->m_centralCaches
, sizeof(TCMalloc_Central_FreeListPadded
) * kNumClasses
);
3558 FreeObjectFinder
finder(memoryReader
);
3559 finder
.findFreeObjects(threadHeaps
);
3560 finder
.findFreeObjects(centralCaches
, kNumClasses
);
3562 TCMalloc_PageHeap::PageMap
* pageMap
= &pageHeap
->pagemap_
;
3563 PageMapFreeObjectFinder
pageMapFinder(memoryReader
, finder
);
3564 pageMap
->visit(pageMapFinder
, memoryReader
);
3566 PageMapMemoryUsageRecorder
usageRecorder(task
, context
, typeMask
, recorder
, memoryReader
, finder
);
3567 pageMap
->visit(usageRecorder
, memoryReader
);
3572 size_t FastMallocZone::size(malloc_zone_t
*, const void*)
3577 void* FastMallocZone::zoneMalloc(malloc_zone_t
*, size_t)
3582 void* FastMallocZone::zoneCalloc(malloc_zone_t
*, size_t, size_t)
3587 void FastMallocZone::zoneFree(malloc_zone_t
*, void* ptr
)
3589 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
3590 // is not in this zone. When this happens, the pointer being freed was not allocated by any
3591 // zone so we need to print a useful error for the application developer.
3592 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr
);
3595 void* FastMallocZone::zoneRealloc(malloc_zone_t
*, void*, size_t)
3607 malloc_introspection_t jscore_fastmalloc_introspection
= { &FastMallocZone::enumerate
, &FastMallocZone::goodSize
, &FastMallocZone::check
, &FastMallocZone::print
,
3608 &FastMallocZone::log
, &FastMallocZone::forceLock
, &FastMallocZone::forceUnlock
, &FastMallocZone::statistics
};
3611 FastMallocZone::FastMallocZone(TCMalloc_PageHeap
* pageHeap
, TCMalloc_ThreadCache
** threadHeaps
, TCMalloc_Central_FreeListPadded
* centralCaches
)
3612 : m_pageHeap(pageHeap
)
3613 , m_threadHeaps(threadHeaps
)
3614 , m_centralCaches(centralCaches
)
3616 memset(&m_zone
, 0, sizeof(m_zone
));
3617 m_zone
.zone_name
= "JavaScriptCore FastMalloc";
3618 m_zone
.size
= &FastMallocZone::size
;
3619 m_zone
.malloc
= &FastMallocZone::zoneMalloc
;
3620 m_zone
.calloc
= &FastMallocZone::zoneCalloc
;
3621 m_zone
.realloc
= &FastMallocZone::zoneRealloc
;
3622 m_zone
.free
= &FastMallocZone::zoneFree
;
3623 m_zone
.valloc
= &FastMallocZone::zoneValloc
;
3624 m_zone
.destroy
= &FastMallocZone::zoneDestroy
;
3625 m_zone
.introspect
= &jscore_fastmalloc_introspection
;
3626 malloc_zone_register(&m_zone
);
3630 void FastMallocZone::init()
3632 static FastMallocZone
zone(pageheap
, &thread_heaps
, static_cast<TCMalloc_Central_FreeListPadded
*>(central_cache
));
3636 void releaseFastMallocFreeMemory()
3638 SpinLockHolder
h(&pageheap_lock
);
3639 pageheap
->ReleaseFreePages();
3649 #endif // USE_SYSTEM_MALLOC