]> git.saurik.com Git - apple/javascriptcore.git/blame - wtf/FastMalloc.cpp
JavaScriptCore-576.tar.gz
[apple/javascriptcore.git] / wtf / FastMalloc.cpp
CommitLineData
b37bf2e1
A
1// Copyright (c) 2005, 2007, Google Inc.
2// All rights reserved.
f9bf01c6 3// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
b37bf2e1
A
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat <opensource@google.com>
33//
34// A malloc that uses a per-thread cache to satisfy small malloc requests.
35// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
36//
37// See doc/tcmalloc.html for a high-level
38// description of how this malloc works.
39//
40// SYNCHRONIZATION
41// 1. The thread-specific lists are accessed without acquiring any locks.
42// This is safe because each such list is only accessed by one thread.
43// 2. We have a lock per central free-list, and hold it while manipulating
44// the central free list for a particular size.
45// 3. The central page allocator is protected by "pageheap_lock".
46// 4. The pagemap (which maps from page-number to descriptor),
47// can be read without holding any locks, and written while holding
48// the "pageheap_lock".
49// 5. To improve performance, a subset of the information one can get
50// from the pagemap is cached in a data structure, pagemap_cache_,
51// that atomically reads and writes its entries. This cache can be
52// read and written without locking.
53//
54// This multi-threaded access to the pagemap is safe for fairly
55// subtle reasons. We basically assume that when an object X is
56// allocated by thread A and deallocated by thread B, there must
57// have been appropriate synchronization in the handoff of object
58// X from thread A to thread B. The same logic applies to pagemap_cache_.
59//
60// THE PAGEID-TO-SIZECLASS CACHE
61// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62// returns 0 for a particular PageID then that means "no information," not that
63// the sizeclass is 0. The cache may have stale information for pages that do
64// not hold the beginning of any free()'able object. Staleness is eliminated
65// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66// do_memalign() for all other relevant pages.
67//
68// TODO: Bias reclamation to larger addresses
69// TODO: implement mallinfo/mallopt
70// TODO: Better testing
71//
72// 9/28/2003 (new page-level allocator replaces ptmalloc2):
73// * malloc/free of small objects goes from ~300 ns to ~50 ns.
74// * allocation of a reasonably complicated struct
75// goes from about 1100 ns to about 300 ns.
76
77#include "config.h"
78#include "FastMalloc.h"
79
80#include "Assertions.h"
ba379fdc 81#include <limits>
9dae56ea 82#if ENABLE(JSC_MULTIPLE_THREADS)
b37bf2e1
A
83#include <pthread.h>
84#endif
85
86#ifndef NO_TCMALLOC_SAMPLES
87#ifdef WTF_CHANGES
88#define NO_TCMALLOC_SAMPLES
89#endif
90#endif
91
f9bf01c6 92#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
b37bf2e1
A
93#define FORCE_SYSTEM_MALLOC 0
94#else
95#define FORCE_SYSTEM_MALLOC 1
96#endif
97
ba379fdc
A
98// Use a background thread to periodically scavenge memory to release back to the system
99// https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
100#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
9dae56ea 101
b37bf2e1
A
102#ifndef NDEBUG
103namespace WTF {
104
9dae56ea 105#if ENABLE(JSC_MULTIPLE_THREADS)
b37bf2e1
A
106static pthread_key_t isForbiddenKey;
107static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
108static void initializeIsForbiddenKey()
109{
110 pthread_key_create(&isForbiddenKey, 0);
111}
112
f9bf01c6 113#if !ASSERT_DISABLED
b37bf2e1
A
114static bool isForbidden()
115{
116 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
117 return !!pthread_getspecific(isForbiddenKey);
118}
f9bf01c6 119#endif
b37bf2e1
A
120
121void fastMallocForbid()
122{
123 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
124 pthread_setspecific(isForbiddenKey, &isForbiddenKey);
125}
126
127void fastMallocAllow()
128{
129 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
130 pthread_setspecific(isForbiddenKey, 0);
131}
132
133#else
134
135static bool staticIsForbidden;
136static bool isForbidden()
137{
138 return staticIsForbidden;
139}
140
141void fastMallocForbid()
142{
143 staticIsForbidden = true;
144}
145
146void fastMallocAllow()
147{
148 staticIsForbidden = false;
149}
9dae56ea 150#endif // ENABLE(JSC_MULTIPLE_THREADS)
b37bf2e1
A
151
152} // namespace WTF
153#endif // NDEBUG
154
155#include <string.h>
156
157namespace WTF {
9dae56ea 158
ba379fdc
A
159#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
160
161namespace Internal {
162
163void fastMallocMatchFailed(void*)
164{
165 CRASH();
166}
167
168} // namespace Internal
169
170#endif
171
9dae56ea 172void* fastZeroedMalloc(size_t n)
b37bf2e1 173{
9dae56ea 174 void* result = fastMalloc(n);
b37bf2e1 175 memset(result, 0, n);
b37bf2e1
A
176 return result;
177}
f9bf01c6
A
178
179char* fastStrDup(const char* src)
180{
181 int len = strlen(src) + 1;
182 char* dup = static_cast<char*>(fastMalloc(len));
183
184 if (dup)
185 memcpy(dup, src, len);
186
187 return dup;
188}
b37bf2e1 189
f9bf01c6 190TryMallocReturnValue tryFastZeroedMalloc(size_t n)
9dae56ea 191{
f9bf01c6
A
192 void* result;
193 if (!tryFastMalloc(n).getValue(result))
9dae56ea
A
194 return 0;
195 memset(result, 0, n);
196 return result;
b37bf2e1
A
197}
198
9dae56ea
A
199} // namespace WTF
200
b37bf2e1
A
201#if FORCE_SYSTEM_MALLOC
202
b37bf2e1 203namespace WTF {
9dae56ea 204
f9bf01c6 205TryMallocReturnValue tryFastMalloc(size_t n)
b37bf2e1
A
206{
207 ASSERT(!isForbidden());
ba379fdc
A
208
209#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
210 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
211 return 0;
212
213 void* result = malloc(n + sizeof(AllocAlignmentInteger));
214 if (!result)
215 return 0;
216
217 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
218 result = static_cast<AllocAlignmentInteger*>(result) + 1;
219
220 return result;
221#else
b37bf2e1 222 return malloc(n);
ba379fdc 223#endif
b37bf2e1
A
224}
225
9dae56ea
A
226void* fastMalloc(size_t n)
227{
228 ASSERT(!isForbidden());
ba379fdc
A
229
230#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
f9bf01c6
A
231 TryMallocReturnValue returnValue = tryFastMalloc(n);
232 void* result;
233 returnValue.getValue(result);
ba379fdc 234#else
9dae56ea 235 void* result = malloc(n);
ba379fdc
A
236#endif
237
9dae56ea
A
238 if (!result)
239 CRASH();
240 return result;
241}
242
f9bf01c6 243TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
b37bf2e1
A
244{
245 ASSERT(!isForbidden());
ba379fdc
A
246
247#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
248 size_t totalBytes = n_elements * element_size;
249 if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
250 return 0;
251
252 totalBytes += sizeof(AllocAlignmentInteger);
253 void* result = malloc(totalBytes);
254 if (!result)
255 return 0;
256
257 memset(result, 0, totalBytes);
258 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
259 result = static_cast<AllocAlignmentInteger*>(result) + 1;
260 return result;
261#else
b37bf2e1 262 return calloc(n_elements, element_size);
ba379fdc 263#endif
b37bf2e1
A
264}
265
9dae56ea
A
266void* fastCalloc(size_t n_elements, size_t element_size)
267{
268 ASSERT(!isForbidden());
ba379fdc
A
269
270#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
f9bf01c6
A
271 TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
272 void* result;
273 returnValue.getValue(result);
ba379fdc 274#else
9dae56ea 275 void* result = calloc(n_elements, element_size);
ba379fdc
A
276#endif
277
9dae56ea
A
278 if (!result)
279 CRASH();
280 return result;
281}
282
b37bf2e1
A
283void fastFree(void* p)
284{
285 ASSERT(!isForbidden());
ba379fdc
A
286
287#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
288 if (!p)
289 return;
290
291 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
292 if (*header != Internal::AllocTypeMalloc)
293 Internal::fastMallocMatchFailed(p);
294 free(header);
295#else
b37bf2e1 296 free(p);
ba379fdc 297#endif
b37bf2e1
A
298}
299
f9bf01c6 300TryMallocReturnValue tryFastRealloc(void* p, size_t n)
b37bf2e1
A
301{
302 ASSERT(!isForbidden());
ba379fdc
A
303
304#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
305 if (p) {
306 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
307 return 0;
308 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
309 if (*header != Internal::AllocTypeMalloc)
310 Internal::fastMallocMatchFailed(p);
311 void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
312 if (!result)
313 return 0;
314
315 // This should not be needed because the value is already there:
316 // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
317 result = static_cast<AllocAlignmentInteger*>(result) + 1;
318 return result;
319 } else {
320 return fastMalloc(n);
321 }
322#else
b37bf2e1 323 return realloc(p, n);
ba379fdc 324#endif
b37bf2e1
A
325}
326
9dae56ea
A
327void* fastRealloc(void* p, size_t n)
328{
329 ASSERT(!isForbidden());
ba379fdc
A
330
331#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
f9bf01c6
A
332 TryMallocReturnValue returnValue = tryFastRealloc(p, n);
333 void* result;
334 returnValue.getValue(result);
ba379fdc 335#else
9dae56ea 336 void* result = realloc(p, n);
ba379fdc
A
337#endif
338
9dae56ea
A
339 if (!result)
340 CRASH();
341 return result;
342}
343
b37bf2e1 344void releaseFastMallocFreeMemory() { }
9dae56ea
A
345
346FastMallocStatistics fastMallocStatistics()
347{
348 FastMallocStatistics statistics = { 0, 0, 0, 0 };
349 return statistics;
350}
f4e78d34
A
351
352} // namespace WTF
b37bf2e1 353
f9bf01c6 354#if OS(DARWIN)
b37bf2e1
A
355// This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
356// It will never be used in this case, so it's type and value are less interesting than its presence.
357extern "C" const int jscore_fastmalloc_introspection = 0;
358#endif
359
f4e78d34 360#else // FORCE_SYSTEM_MALLOC
b37bf2e1
A
361
362#if HAVE(STDINT_H)
363#include <stdint.h>
364#elif HAVE(INTTYPES_H)
365#include <inttypes.h>
366#else
367#include <sys/types.h>
368#endif
369
370#include "AlwaysInline.h"
371#include "Assertions.h"
372#include "TCPackedCache.h"
373#include "TCPageMap.h"
374#include "TCSpinLock.h"
375#include "TCSystemAlloc.h"
376#include <algorithm>
377#include <errno.h>
ba379fdc 378#include <limits>
b37bf2e1
A
379#include <new>
380#include <pthread.h>
381#include <stdarg.h>
382#include <stddef.h>
383#include <stdio.h>
f9bf01c6
A
384#if OS(UNIX)
385#include <unistd.h>
386#endif
b37bf2e1
A
387#if COMPILER(MSVC)
388#ifndef WIN32_LEAN_AND_MEAN
389#define WIN32_LEAN_AND_MEAN
390#endif
391#include <windows.h>
392#endif
393
394#if WTF_CHANGES
395
f9bf01c6 396#if OS(DARWIN)
b37bf2e1 397#include "MallocZoneSupport.h"
9dae56ea 398#include <wtf/HashSet.h>
f9bf01c6
A
399#include <wtf/Vector.h>
400#endif
401#if HAVE(DISPATCH_H)
402#include <dispatch/dispatch.h>
b37bf2e1
A
403#endif
404
f9bf01c6 405
b37bf2e1
A
406#ifndef PRIuS
407#define PRIuS "zu"
408#endif
409
410// Calling pthread_getspecific through a global function pointer is faster than a normal
411// call to the function on Mac OS X, and it's used in performance-critical code. So we
412// use a function pointer. But that's not necessarily faster on other platforms, and we had
413// problems with this technique on Windows, so we'll do this only on Mac OS X.
f9bf01c6 414#if OS(DARWIN)
b37bf2e1
A
415static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
416#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
417#endif
418
419#define DEFINE_VARIABLE(type, name, value, meaning) \
420 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
421 type FLAGS_##name(value); \
422 char FLAGS_no##name; \
423 } \
424 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
425
426#define DEFINE_int64(name, value, meaning) \
427 DEFINE_VARIABLE(int64_t, name, value, meaning)
428
429#define DEFINE_double(name, value, meaning) \
430 DEFINE_VARIABLE(double, name, value, meaning)
431
432namespace WTF {
433
434#define malloc fastMalloc
435#define calloc fastCalloc
436#define free fastFree
437#define realloc fastRealloc
438
439#define MESSAGE LOG_ERROR
440#define CHECK_CONDITION ASSERT
441
f9bf01c6 442#if OS(DARWIN)
ba379fdc
A
443class Span;
444class TCMalloc_Central_FreeListPadded;
b37bf2e1
A
445class TCMalloc_PageHeap;
446class TCMalloc_ThreadCache;
ba379fdc 447template <typename T> class PageHeapAllocator;
b37bf2e1
A
448
449class FastMallocZone {
450public:
451 static void init();
452
453 static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
454 static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
455 static boolean_t check(malloc_zone_t*) { return true; }
456 static void print(malloc_zone_t*, boolean_t) { }
457 static void log(malloc_zone_t*, void*) { }
458 static void forceLock(malloc_zone_t*) { }
459 static void forceUnlock(malloc_zone_t*) { }
9dae56ea 460 static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
b37bf2e1
A
461
462private:
ba379fdc 463 FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
b37bf2e1
A
464 static size_t size(malloc_zone_t*, const void*);
465 static void* zoneMalloc(malloc_zone_t*, size_t);
466 static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
467 static void zoneFree(malloc_zone_t*, void*);
468 static void* zoneRealloc(malloc_zone_t*, void*, size_t);
469 static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
470 static void zoneDestroy(malloc_zone_t*) { }
471
472 malloc_zone_t m_zone;
473 TCMalloc_PageHeap* m_pageHeap;
474 TCMalloc_ThreadCache** m_threadHeaps;
475 TCMalloc_Central_FreeListPadded* m_centralCaches;
ba379fdc
A
476 PageHeapAllocator<Span>* m_spanAllocator;
477 PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
b37bf2e1
A
478};
479
480#endif
481
482#endif
483
484#ifndef WTF_CHANGES
485// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
486// you're porting to a system where you really can't get a stacktrace.
487#ifdef NO_TCMALLOC_SAMPLES
488// We use #define so code compiles even if you #include stacktrace.h somehow.
489# define GetStackTrace(stack, depth, skip) (0)
490#else
491# include <google/stacktrace.h>
492#endif
493#endif
494
495// Even if we have support for thread-local storage in the compiler
496// and linker, the OS may not support it. We need to check that at
497// runtime. Right now, we have to keep a manual set of "bad" OSes.
498#if defined(HAVE_TLS)
499 static bool kernel_supports_tls = false; // be conservative
500 static inline bool KernelSupportsTLS() {
501 return kernel_supports_tls;
502 }
503# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
504 static void CheckIfKernelSupportsTLS() {
505 kernel_supports_tls = false;
506 }
507# else
508# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
509 static void CheckIfKernelSupportsTLS() {
510 struct utsname buf;
511 if (uname(&buf) != 0) { // should be impossible
512 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
513 kernel_supports_tls = false;
514 } else if (strcasecmp(buf.sysname, "linux") == 0) {
515 // The linux case: the first kernel to support TLS was 2.6.0
516 if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
517 kernel_supports_tls = false;
518 else if (buf.release[0] == '2' && buf.release[1] == '.' &&
519 buf.release[2] >= '0' && buf.release[2] < '6' &&
520 buf.release[3] == '.') // 2.0 - 2.5
521 kernel_supports_tls = false;
522 else
523 kernel_supports_tls = true;
524 } else { // some other kernel, we'll be optimisitic
525 kernel_supports_tls = true;
526 }
527 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
528 }
529# endif // HAVE_DECL_UNAME
530#endif // HAVE_TLS
531
532// __THROW is defined in glibc systems. It means, counter-intuitively,
533// "This function will never throw an exception." It's an optional
534// optimization tool, but we may need to use it to match glibc prototypes.
535#ifndef __THROW // I guess we're not on a glibc system
536# define __THROW // __THROW is just an optimization, so ok to make it ""
537#endif
538
539//-------------------------------------------------------------------
540// Configuration
541//-------------------------------------------------------------------
542
543// Not all possible combinations of the following parameters make
544// sense. In particular, if kMaxSize increases, you may have to
545// increase kNumClasses as well.
546static const size_t kPageShift = 12;
547static const size_t kPageSize = 1 << kPageShift;
548static const size_t kMaxSize = 8u * kPageSize;
549static const size_t kAlignShift = 3;
550static const size_t kAlignment = 1 << kAlignShift;
551static const size_t kNumClasses = 68;
552
553// Allocates a big block of memory for the pagemap once we reach more than
554// 128MB
555static const size_t kPageMapBigAllocationThreshold = 128 << 20;
556
557// Minimum number of pages to fetch from system at a time. Must be
f9bf01c6 558// significantly bigger than kPageSize to amortize system-call
b37bf2e1
A
559// overhead, and also to reduce external fragementation. Also, we
560// should keep this value big because various incarnations of Linux
561// have small limits on the number of mmap() regions per
562// address-space.
563static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
564
565// Number of objects to move between a per-thread list and a central
566// list in one shot. We want this to be not too small so we can
567// amortize the lock overhead for accessing the central list. Making
568// it too big may temporarily cause unnecessary memory wastage in the
569// per-thread free list until the scavenger cleans up the list.
570static int num_objects_to_move[kNumClasses];
571
572// Maximum length we allow a per-thread free-list to have before we
573// move objects from it into the corresponding central free-list. We
574// want this big to avoid locking the central free-list too often. It
575// should not hurt to make this list somewhat big because the
576// scavenging code will shrink it down when its contents are not in use.
577static const int kMaxFreeListLength = 256;
578
579// Lower and upper bounds on the per-thread cache sizes
580static const size_t kMinThreadCacheSize = kMaxSize * 2;
b5422865 581static const size_t kMaxThreadCacheSize = 512 * 1024;
b37bf2e1
A
582
583// Default bound on the total amount of thread caches
584static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
585
586// For all span-lengths < kMaxPages we keep an exact-size list.
587// REQUIRED: kMaxPages >= kMinSystemAlloc;
588static const size_t kMaxPages = kMinSystemAlloc;
589
590/* The smallest prime > 2^n */
591static int primes_list[] = {
592 // Small values might cause high rates of sampling
593 // and hence commented out.
594 // 2, 5, 11, 17, 37, 67, 131, 257,
595 // 521, 1031, 2053, 4099, 8209, 16411,
596 32771, 65537, 131101, 262147, 524309, 1048583,
597 2097169, 4194319, 8388617, 16777259, 33554467 };
598
599// Twice the approximate gap between sampling actions.
600// I.e., we take one sample approximately once every
601// tcmalloc_sample_parameter/2
602// bytes of allocation, i.e., ~ once every 128KB.
603// Must be a prime number.
604#ifdef NO_TCMALLOC_SAMPLES
605DEFINE_int64(tcmalloc_sample_parameter, 0,
606 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
607static size_t sample_period = 0;
608#else
609DEFINE_int64(tcmalloc_sample_parameter, 262147,
610 "Twice the approximate gap between sampling actions."
611 " Must be a prime number. Otherwise will be rounded up to a "
612 " larger prime number");
613static size_t sample_period = 262147;
614#endif
615
616// Protects sample_period above
617static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
618
619// Parameters for controlling how fast memory is returned to the OS.
620
621DEFINE_double(tcmalloc_release_rate, 1,
622 "Rate at which we release unused memory to the system. "
623 "Zero means we never release memory back to the system. "
624 "Increase this flag to return memory faster; decrease it "
625 "to return memory slower. Reasonable rates are in the "
626 "range [0,10]");
627
628//-------------------------------------------------------------------
629// Mapping from size to size_class and vice versa
630//-------------------------------------------------------------------
631
632// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
633// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
634// So for these larger sizes we have an array indexed by ceil(size/128).
635//
636// We flatten both logical arrays into one physical array and use
637// arithmetic to compute an appropriate index. The constants used by
638// ClassIndex() were selected to make the flattening work.
639//
640// Examples:
641// Size Expression Index
642// -------------------------------------------------------
643// 0 (0 + 7) / 8 0
644// 1 (1 + 7) / 8 1
645// ...
646// 1024 (1024 + 7) / 8 128
647// 1025 (1025 + 127 + (120<<7)) / 128 129
648// ...
649// 32768 (32768 + 127 + (120<<7)) / 128 376
650static const size_t kMaxSmallSize = 1024;
651static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
652static const int add_amount[2] = { 7, 127 + (120 << 7) };
653static unsigned char class_array[377];
654
655// Compute index of the class_array[] entry for a given size
656static inline int ClassIndex(size_t s) {
657 const int i = (s > kMaxSmallSize);
658 return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
659}
660
661// Mapping from size class to max size storable in that class
662static size_t class_to_size[kNumClasses];
663
664// Mapping from size class to number of pages to allocate at a time
665static size_t class_to_pages[kNumClasses];
666
667// TransferCache is used to cache transfers of num_objects_to_move[size_class]
668// back and forth between thread caches and the central cache for a given size
669// class.
670struct TCEntry {
671 void *head; // Head of chain of objects.
672 void *tail; // Tail of chain of objects.
673};
674// A central cache freelist can have anywhere from 0 to kNumTransferEntries
675// slots to put link list chains into. To keep memory usage bounded the total
676// number of TCEntries across size classes is fixed. Currently each size
677// class is initially given one TCEntry which also means that the maximum any
678// one class can have is kNumClasses.
679static const int kNumTransferEntries = kNumClasses;
680
681// Note: the following only works for "n"s that fit in 32-bits, but
682// that is fine since we only use it for small sizes.
683static inline int LgFloor(size_t n) {
684 int log = 0;
685 for (int i = 4; i >= 0; --i) {
686 int shift = (1 << i);
687 size_t x = n >> shift;
688 if (x != 0) {
689 n = x;
690 log += shift;
691 }
692 }
693 ASSERT(n == 1);
694 return log;
695}
696
697// Some very basic linked list functions for dealing with using void * as
698// storage.
699
700static inline void *SLL_Next(void *t) {
701 return *(reinterpret_cast<void**>(t));
702}
703
704static inline void SLL_SetNext(void *t, void *n) {
705 *(reinterpret_cast<void**>(t)) = n;
706}
707
708static inline void SLL_Push(void **list, void *element) {
709 SLL_SetNext(element, *list);
710 *list = element;
711}
712
713static inline void *SLL_Pop(void **list) {
714 void *result = *list;
715 *list = SLL_Next(*list);
716 return result;
717}
718
719
720// Remove N elements from a linked list to which head points. head will be
721// modified to point to the new head. start and end will point to the first
722// and last nodes of the range. Note that end will point to NULL after this
723// function is called.
724static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
725 if (N == 0) {
726 *start = NULL;
727 *end = NULL;
728 return;
729 }
730
731 void *tmp = *head;
732 for (int i = 1; i < N; ++i) {
733 tmp = SLL_Next(tmp);
734 }
735
736 *start = *head;
737 *end = tmp;
738 *head = SLL_Next(tmp);
739 // Unlink range from list.
740 SLL_SetNext(tmp, NULL);
741}
742
743static inline void SLL_PushRange(void **head, void *start, void *end) {
744 if (!start) return;
745 SLL_SetNext(end, *head);
746 *head = start;
747}
748
749static inline size_t SLL_Size(void *head) {
750 int count = 0;
751 while (head) {
752 count++;
753 head = SLL_Next(head);
754 }
755 return count;
756}
757
758// Setup helper functions.
759
760static ALWAYS_INLINE size_t SizeClass(size_t size) {
761 return class_array[ClassIndex(size)];
762}
763
764// Get the byte-size for a specified class
765static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
766 return class_to_size[cl];
767}
768static int NumMoveSize(size_t size) {
769 if (size == 0) return 0;
770 // Use approx 64k transfers between thread and central caches.
771 int num = static_cast<int>(64.0 * 1024.0 / size);
772 if (num < 2) num = 2;
773 // Clamp well below kMaxFreeListLength to avoid ping pong between central
774 // and thread caches.
775 if (num > static_cast<int>(0.8 * kMaxFreeListLength))
776 num = static_cast<int>(0.8 * kMaxFreeListLength);
777
778 // Also, avoid bringing in too many objects into small object free
779 // lists. There are lots of such lists, and if we allow each one to
780 // fetch too many at a time, we end up having to scavenge too often
781 // (especially when there are lots of threads and each thread gets a
782 // small allowance for its thread cache).
783 //
784 // TODO: Make thread cache free list sizes dynamic so that we do not
785 // have to equally divide a fixed resource amongst lots of threads.
786 if (num > 32) num = 32;
787
788 return num;
789}
790
791// Initialize the mapping arrays
792static void InitSizeClasses() {
793 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
794 if (ClassIndex(0) < 0) {
795 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
9dae56ea 796 CRASH();
b37bf2e1
A
797 }
798 if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
799 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
9dae56ea 800 CRASH();
b37bf2e1
A
801 }
802
803 // Compute the size classes we want to use
804 size_t sc = 1; // Next size class to assign
805 unsigned char alignshift = kAlignShift;
806 int last_lg = -1;
807 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
808 int lg = LgFloor(size);
809 if (lg > last_lg) {
810 // Increase alignment every so often.
811 //
812 // Since we double the alignment every time size doubles and
813 // size >= 128, this means that space wasted due to alignment is
814 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
815 // bytes, so the space wasted as a percentage starts falling for
816 // sizes > 2K.
817 if ((lg >= 7) && (alignshift < 8)) {
818 alignshift++;
819 }
820 last_lg = lg;
821 }
822
823 // Allocate enough pages so leftover is less than 1/8 of total.
824 // This bounds wasted space to at most 12.5%.
825 size_t psize = kPageSize;
826 while ((psize % size) > (psize >> 3)) {
827 psize += kPageSize;
828 }
829 const size_t my_pages = psize >> kPageShift;
830
831 if (sc > 1 && my_pages == class_to_pages[sc-1]) {
832 // See if we can merge this into the previous class without
833 // increasing the fragmentation of the previous class.
834 const size_t my_objects = (my_pages << kPageShift) / size;
835 const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
836 / class_to_size[sc-1];
837 if (my_objects == prev_objects) {
838 // Adjust last class to include this size
839 class_to_size[sc-1] = size;
840 continue;
841 }
842 }
843
844 // Add new class
845 class_to_pages[sc] = my_pages;
846 class_to_size[sc] = size;
847 sc++;
848 }
849 if (sc != kNumClasses) {
850 MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
851 sc, int(kNumClasses));
9dae56ea 852 CRASH();
b37bf2e1
A
853 }
854
855 // Initialize the mapping arrays
856 int next_size = 0;
857 for (unsigned char c = 1; c < kNumClasses; c++) {
858 const size_t max_size_in_class = class_to_size[c];
859 for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
860 class_array[ClassIndex(s)] = c;
861 }
862 next_size = static_cast<int>(max_size_in_class + kAlignment);
863 }
864
865 // Double-check sizes just to be safe
866 for (size_t size = 0; size <= kMaxSize; size++) {
867 const size_t sc = SizeClass(size);
868 if (sc == 0) {
869 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
9dae56ea 870 CRASH();
b37bf2e1
A
871 }
872 if (sc > 1 && size <= class_to_size[sc-1]) {
873 MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
874 "\n", sc, size);
9dae56ea 875 CRASH();
b37bf2e1
A
876 }
877 if (sc >= kNumClasses) {
878 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
9dae56ea 879 CRASH();
b37bf2e1
A
880 }
881 const size_t s = class_to_size[sc];
882 if (size > s) {
883 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
9dae56ea 884 CRASH();
b37bf2e1
A
885 }
886 if (s == 0) {
887 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
9dae56ea 888 CRASH();
b37bf2e1
A
889 }
890 }
891
892 // Initialize the num_objects_to_move array.
893 for (size_t cl = 1; cl < kNumClasses; ++cl) {
894 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
895 }
896
897#ifndef WTF_CHANGES
898 if (false) {
899 // Dump class sizes and maximum external wastage per size class
900 for (size_t cl = 1; cl < kNumClasses; ++cl) {
901 const int alloc_size = class_to_pages[cl] << kPageShift;
902 const int alloc_objs = alloc_size / class_to_size[cl];
903 const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
904 const int max_waste = alloc_size - min_used;
905 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
906 int(cl),
907 int(class_to_size[cl-1] + 1),
908 int(class_to_size[cl]),
909 int(class_to_pages[cl] << kPageShift),
910 max_waste * 100.0 / alloc_size
911 );
912 }
913 }
914#endif
915}
916
917// -------------------------------------------------------------------------
918// Simple allocator for objects of a specified type. External locking
919// is required before accessing one of these objects.
920// -------------------------------------------------------------------------
921
922// Metadata allocator -- keeps stats about how many bytes allocated
923static uint64_t metadata_system_bytes = 0;
924static void* MetaDataAlloc(size_t bytes) {
925 void* result = TCMalloc_SystemAlloc(bytes, 0);
926 if (result != NULL) {
927 metadata_system_bytes += bytes;
928 }
929 return result;
930}
931
932template <class T>
933class PageHeapAllocator {
934 private:
935 // How much to allocate from system at a time
936 static const size_t kAllocIncrement = 32 << 10;
937
938 // Aligned size of T
939 static const size_t kAlignedSize
940 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
941
942 // Free area from which to carve new objects
943 char* free_area_;
944 size_t free_avail_;
945
ba379fdc
A
946 // Linked list of all regions allocated by this allocator
947 void* allocated_regions_;
948
b37bf2e1
A
949 // Free list of already carved objects
950 void* free_list_;
951
952 // Number of allocated but unfreed objects
953 int inuse_;
954
955 public:
956 void Init() {
957 ASSERT(kAlignedSize <= kAllocIncrement);
958 inuse_ = 0;
ba379fdc 959 allocated_regions_ = 0;
b37bf2e1
A
960 free_area_ = NULL;
961 free_avail_ = 0;
962 free_list_ = NULL;
963 }
964
965 T* New() {
966 // Consult free list
967 void* result;
968 if (free_list_ != NULL) {
969 result = free_list_;
970 free_list_ = *(reinterpret_cast<void**>(result));
971 } else {
972 if (free_avail_ < kAlignedSize) {
973 // Need more room
ba379fdc
A
974 char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
975 if (!new_allocation)
976 CRASH();
977
978 *(void**)new_allocation = allocated_regions_;
979 allocated_regions_ = new_allocation;
980 free_area_ = new_allocation + kAlignedSize;
981 free_avail_ = kAllocIncrement - kAlignedSize;
b37bf2e1
A
982 }
983 result = free_area_;
984 free_area_ += kAlignedSize;
985 free_avail_ -= kAlignedSize;
986 }
987 inuse_++;
988 return reinterpret_cast<T*>(result);
989 }
990
991 void Delete(T* p) {
992 *(reinterpret_cast<void**>(p)) = free_list_;
993 free_list_ = p;
994 inuse_--;
995 }
996
997 int inuse() const { return inuse_; }
ba379fdc 998
f9bf01c6 999#if defined(WTF_CHANGES) && OS(DARWIN)
ba379fdc
A
1000 template <class Recorder>
1001 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
1002 {
1003 vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
1004 while (adminAllocation) {
1005 recorder.recordRegion(adminAllocation, kAllocIncrement);
1006 adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
1007 }
1008 }
1009#endif
b37bf2e1
A
1010};
1011
1012// -------------------------------------------------------------------------
1013// Span - a contiguous run of pages
1014// -------------------------------------------------------------------------
1015
1016// Type that can hold a page number
1017typedef uintptr_t PageID;
1018
1019// Type that can hold the length of a run of pages
1020typedef uintptr_t Length;
1021
1022static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
1023
1024// Convert byte size into pages. This won't overflow, but may return
1025// an unreasonably large value if bytes is huge enough.
1026static inline Length pages(size_t bytes) {
1027 return (bytes >> kPageShift) +
1028 ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
1029}
1030
1031// Convert a user size into the number of bytes that will actually be
1032// allocated
1033static size_t AllocationSize(size_t bytes) {
1034 if (bytes > kMaxSize) {
1035 // Large object: we allocate an integral number of pages
1036 ASSERT(bytes <= (kMaxValidPages << kPageShift));
1037 return pages(bytes) << kPageShift;
1038 } else {
1039 // Small object: find the size class to which it belongs
1040 return ByteSizeForClass(SizeClass(bytes));
1041 }
1042}
1043
1044// Information kept for a span (a contiguous run of pages).
1045struct Span {
1046 PageID start; // Starting page number
1047 Length length; // Number of pages in span
1048 Span* next; // Used when in link list
1049 Span* prev; // Used when in link list
1050 void* objects; // Linked list of free objects
1051 unsigned int free : 1; // Is the span free
9dae56ea 1052#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1 1053 unsigned int sample : 1; // Sampled object?
9dae56ea 1054#endif
b37bf2e1
A
1055 unsigned int sizeclass : 8; // Size-class for small objects (or 0)
1056 unsigned int refcount : 11; // Number of non-free objects
9dae56ea 1057 bool decommitted : 1;
b37bf2e1
A
1058
1059#undef SPAN_HISTORY
1060#ifdef SPAN_HISTORY
1061 // For debugging, we can keep a log events per span
1062 int nexthistory;
1063 char history[64];
1064 int value[64];
1065#endif
1066};
1067
9dae56ea 1068#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
9dae56ea 1069
b37bf2e1
A
1070#ifdef SPAN_HISTORY
1071void Event(Span* span, char op, int v = 0) {
1072 span->history[span->nexthistory] = op;
1073 span->value[span->nexthistory] = v;
1074 span->nexthistory++;
1075 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
1076}
1077#else
1078#define Event(s,o,v) ((void) 0)
1079#endif
1080
1081// Allocator/deallocator for spans
1082static PageHeapAllocator<Span> span_allocator;
1083static Span* NewSpan(PageID p, Length len) {
1084 Span* result = span_allocator.New();
1085 memset(result, 0, sizeof(*result));
1086 result->start = p;
1087 result->length = len;
1088#ifdef SPAN_HISTORY
1089 result->nexthistory = 0;
1090#endif
1091 return result;
1092}
1093
1094static inline void DeleteSpan(Span* span) {
1095#ifndef NDEBUG
1096 // In debug mode, trash the contents of deleted Spans
1097 memset(span, 0x3f, sizeof(*span));
1098#endif
1099 span_allocator.Delete(span);
1100}
1101
1102// -------------------------------------------------------------------------
1103// Doubly linked list of spans.
1104// -------------------------------------------------------------------------
1105
1106static inline void DLL_Init(Span* list) {
1107 list->next = list;
1108 list->prev = list;
1109}
1110
1111static inline void DLL_Remove(Span* span) {
1112 span->prev->next = span->next;
1113 span->next->prev = span->prev;
1114 span->prev = NULL;
1115 span->next = NULL;
1116}
1117
1118static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
1119 return list->next == list;
1120}
1121
b37bf2e1
A
1122static int DLL_Length(const Span* list) {
1123 int result = 0;
1124 for (Span* s = list->next; s != list; s = s->next) {
1125 result++;
1126 }
1127 return result;
1128}
b37bf2e1
A
1129
1130#if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1131static void DLL_Print(const char* label, const Span* list) {
1132 MESSAGE("%-10s %p:", label, list);
1133 for (const Span* s = list->next; s != list; s = s->next) {
1134 MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
1135 }
1136 MESSAGE("\n");
1137}
1138#endif
1139
1140static inline void DLL_Prepend(Span* list, Span* span) {
1141 ASSERT(span->next == NULL);
1142 ASSERT(span->prev == NULL);
1143 span->next = list->next;
1144 span->prev = list;
1145 list->next->prev = span;
1146 list->next = span;
1147}
1148
1149// -------------------------------------------------------------------------
1150// Stack traces kept for sampled allocations
1151// The following state is protected by pageheap_lock_.
1152// -------------------------------------------------------------------------
1153
1154// size/depth are made the same size as a pointer so that some generic
1155// code below can conveniently cast them back and forth to void*.
1156static const int kMaxStackDepth = 31;
1157struct StackTrace {
1158 uintptr_t size; // Size of object
1159 uintptr_t depth; // Number of PC values stored in array below
1160 void* stack[kMaxStackDepth];
1161};
1162static PageHeapAllocator<StackTrace> stacktrace_allocator;
1163static Span sampled_objects;
1164
1165// -------------------------------------------------------------------------
1166// Map from page-id to per-page data
1167// -------------------------------------------------------------------------
1168
1169// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1170// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1171// because sometimes the sizeclass is all the information we need.
1172
1173// Selector class -- general selector uses 3-level map
1174template <int BITS> class MapSelector {
1175 public:
1176 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
1177 typedef PackedCache<BITS, uint64_t> CacheType;
1178};
1179
9dae56ea 1180#if defined(WTF_CHANGES)
f9bf01c6 1181#if CPU(X86_64)
9dae56ea
A
1182// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1183// can be excluded from the PageMap key.
1184// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1185
1186static const size_t kBitsUnusedOn64Bit = 16;
1187#else
1188static const size_t kBitsUnusedOn64Bit = 0;
1189#endif
1190
1191// A three-level map for 64-bit machines
1192template <> class MapSelector<64> {
1193 public:
1194 typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
1195 typedef PackedCache<64, uint64_t> CacheType;
1196};
1197#endif
1198
b37bf2e1
A
1199// A two-level map for 32-bit machines
1200template <> class MapSelector<32> {
1201 public:
9dae56ea
A
1202 typedef TCMalloc_PageMap2<32 - kPageShift> Type;
1203 typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
b37bf2e1
A
1204};
1205
1206// -------------------------------------------------------------------------
1207// Page-level allocator
1208// * Eager coalescing
1209//
1210// Heap for page-level allocation. We allow allocating and freeing a
1211// contiguous runs of pages (called a "span").
1212// -------------------------------------------------------------------------
1213
ba379fdc
A
1214#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1215// The central page heap collects spans of memory that have been deleted but are still committed until they are released
1216// back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
1217// system. Every 5 seconds, the background thread wakes up and does the following:
1218// - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
1219// of free committed pages and so we should not release them back to the system yet.
1220// - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
1221// back to the system.
1222// - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
1223// scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
1224
1225// Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
1226static const int kScavengeTimerDelayInSeconds = 5;
1227
1228// Number of free committed pages that we want to keep around.
1229static const size_t kMinimumFreeCommittedPageCount = 512;
1230
1231// During a scavenge, we'll release up to a fraction of the free committed pages.
f9bf01c6 1232#if OS(WINDOWS)
ba379fdc
A
1233// We are slightly less aggressive in releasing memory on Windows due to performance reasons.
1234static const int kMaxScavengeAmountFactor = 3;
1235#else
1236static const int kMaxScavengeAmountFactor = 2;
1237#endif
1238#endif
1239
b37bf2e1
A
1240class TCMalloc_PageHeap {
1241 public:
1242 void init();
1243
1244 // Allocate a run of "n" pages. Returns zero if out of memory.
1245 Span* New(Length n);
1246
1247 // Delete the span "[p, p+n-1]".
1248 // REQUIRES: span was returned by earlier call to New() and
1249 // has not yet been deleted.
1250 void Delete(Span* span);
1251
1252 // Mark an allocated span as being used for small objects of the
1253 // specified size-class.
1254 // REQUIRES: span was returned by an earlier call to New()
1255 // and has not yet been deleted.
1256 void RegisterSizeClass(Span* span, size_t sc);
1257
1258 // Split an allocated span into two spans: one of length "n" pages
1259 // followed by another span of length "span->length - n" pages.
1260 // Modifies "*span" to point to the first span of length "n" pages.
1261 // Returns a pointer to the second span.
1262 //
1263 // REQUIRES: "0 < n < span->length"
1264 // REQUIRES: !span->free
1265 // REQUIRES: span->sizeclass == 0
1266 Span* Split(Span* span, Length n);
1267
1268 // Return the descriptor for the specified page.
1269 inline Span* GetDescriptor(PageID p) const {
1270 return reinterpret_cast<Span*>(pagemap_.get(p));
1271 }
1272
1273#ifdef WTF_CHANGES
1274 inline Span* GetDescriptorEnsureSafe(PageID p)
1275 {
1276 pagemap_.Ensure(p, 1);
1277 return GetDescriptor(p);
1278 }
9dae56ea
A
1279
1280 size_t ReturnedBytes() const;
b37bf2e1
A
1281#endif
1282
1283 // Dump state to stderr
1284#ifndef WTF_CHANGES
1285 void Dump(TCMalloc_Printer* out);
1286#endif
1287
1288 // Return number of bytes allocated from system
1289 inline uint64_t SystemBytes() const { return system_bytes_; }
1290
1291 // Return number of free bytes in heap
1292 uint64_t FreeBytes() const {
1293 return (static_cast<uint64_t>(free_pages_) << kPageShift);
1294 }
1295
1296 bool Check();
1297 bool CheckList(Span* list, Length min_pages, Length max_pages);
1298
1299 // Release all pages on the free list for reuse by the OS:
1300 void ReleaseFreePages();
1301
1302 // Return 0 if we have no information, or else the correct sizeclass for p.
1303 // Reads and writes to pagemap_cache_ do not require locking.
1304 // The entries are 64 bits on 64-bit hardware and 16 bits on
1305 // 32-bit hardware, and we don't mind raciness as long as each read of
1306 // an entry yields a valid entry, not a partially updated entry.
1307 size_t GetSizeClassIfCached(PageID p) const {
1308 return pagemap_cache_.GetOrDefault(p, 0);
1309 }
1310 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
1311
1312 private:
1313 // Pick the appropriate map and cache types based on pointer size
1314 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
1315 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
1316 PageMap pagemap_;
1317 mutable PageMapCache pagemap_cache_;
1318
1319 // We segregate spans of a given size into two circular linked
1320 // lists: one for normal spans, and one for spans whose memory
1321 // has been returned to the system.
1322 struct SpanList {
1323 Span normal;
1324 Span returned;
1325 };
1326
1327 // List of free spans of length >= kMaxPages
1328 SpanList large_;
1329
1330 // Array mapping from span length to a doubly linked list of free spans
1331 SpanList free_[kMaxPages];
1332
1333 // Number of pages kept in free lists
1334 uintptr_t free_pages_;
1335
1336 // Bytes allocated from system
1337 uint64_t system_bytes_;
1338
ba379fdc
A
1339#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1340 // Number of pages kept in free lists that are still committed.
1341 Length free_committed_pages_;
1342
1343 // Number of pages that we committed in the last scavenge wait interval.
1344 Length pages_committed_since_last_scavenge_;
1345#endif
1346
b37bf2e1
A
1347 bool GrowHeap(Length n);
1348
1349 // REQUIRES span->length >= n
1350 // Remove span from its free list, and move any leftover part of
1351 // span into appropriate free lists. Also update "span" to have
1352 // length exactly "n" and mark it as non-free so it can be returned
1353 // to the client.
1354 //
1355 // "released" is true iff "span" was found on a "returned" list.
1356 void Carve(Span* span, Length n, bool released);
1357
1358 void RecordSpan(Span* span) {
1359 pagemap_.set(span->start, span);
1360 if (span->length > 1) {
1361 pagemap_.set(span->start + span->length - 1, span);
1362 }
1363 }
1364
1365 // Allocate a large span of length == n. If successful, returns a
1366 // span of exactly the specified length. Else, returns NULL.
1367 Span* AllocLarge(Length n);
1368
ba379fdc 1369#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1370 // Incrementally release some memory to the system.
1371 // IncrementalScavenge(n) is called whenever n pages are freed.
1372 void IncrementalScavenge(Length n);
ba379fdc 1373#endif
b37bf2e1
A
1374
1375 // Number of pages to deallocate before doing more scavenging
1376 int64_t scavenge_counter_;
1377
1378 // Index of last free list we scavenged
1379 size_t scavenge_index_;
1380
f9bf01c6 1381#if defined(WTF_CHANGES) && OS(DARWIN)
b37bf2e1
A
1382 friend class FastMallocZone;
1383#endif
ba379fdc
A
1384
1385#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
f9bf01c6
A
1386 void initializeScavenger();
1387 ALWAYS_INLINE void signalScavenger();
1388 void scavenge();
1389 ALWAYS_INLINE bool shouldContinueScavenging() const;
ba379fdc 1390
f9bf01c6
A
1391#if !HAVE(DISPATCH_H)
1392 static NO_RETURN void* runScavengerThread(void*);
ba379fdc
A
1393 NO_RETURN void scavengerThread();
1394
f9bf01c6
A
1395 // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
1396 // it's blocked waiting for more pages to be deleted.
1397 bool m_scavengeThreadActive;
ba379fdc
A
1398
1399 pthread_mutex_t m_scavengeMutex;
ba379fdc 1400 pthread_cond_t m_scavengeCondition;
f9bf01c6
A
1401#else // !HAVE(DISPATCH_H)
1402 void periodicScavenge();
1403
1404 dispatch_queue_t m_scavengeQueue;
1405 dispatch_source_t m_scavengeTimer;
1406 bool m_scavengingScheduled;
1407#endif
ba379fdc 1408
ba379fdc 1409#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1410};
1411
1412void TCMalloc_PageHeap::init()
1413{
1414 pagemap_.init(MetaDataAlloc);
1415 pagemap_cache_ = PageMapCache(0);
1416 free_pages_ = 0;
1417 system_bytes_ = 0;
ba379fdc
A
1418
1419#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1420 free_committed_pages_ = 0;
1421 pages_committed_since_last_scavenge_ = 0;
1422#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1423
b37bf2e1
A
1424 scavenge_counter_ = 0;
1425 // Start scavenging at kMaxPages list
1426 scavenge_index_ = kMaxPages-1;
1427 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
1428 DLL_Init(&large_.normal);
1429 DLL_Init(&large_.returned);
1430 for (size_t i = 0; i < kMaxPages; i++) {
1431 DLL_Init(&free_[i].normal);
1432 DLL_Init(&free_[i].returned);
1433 }
ba379fdc
A
1434
1435#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
f9bf01c6
A
1436 initializeScavenger();
1437#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1438}
1439
1440#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1441
1442#if !HAVE(DISPATCH_H)
1443
1444void TCMalloc_PageHeap::initializeScavenger()
1445{
ba379fdc
A
1446 pthread_mutex_init(&m_scavengeMutex, 0);
1447 pthread_cond_init(&m_scavengeCondition, 0);
1448 m_scavengeThreadActive = true;
1449 pthread_t thread;
1450 pthread_create(&thread, 0, runScavengerThread, this);
b37bf2e1
A
1451}
1452
ba379fdc
A
1453void* TCMalloc_PageHeap::runScavengerThread(void* context)
1454{
1455 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1456#if COMPILER(MSVC)
1457 // Without this, Visual Studio will complain that this method does not return a value.
1458 return 0;
1459#endif
1460}
1461
f9bf01c6
A
1462ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1463{
1464 if (!m_scavengeThreadActive && shouldContinueScavenging())
1465 pthread_cond_signal(&m_scavengeCondition);
1466}
1467
1468#else // !HAVE(DISPATCH_H)
1469
1470void TCMalloc_PageHeap::initializeScavenger()
1471{
1472 m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
1473 m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
1474 dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeTimerDelayInSeconds * NSEC_PER_SEC);
1475 dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeTimerDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
1476 dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
1477 m_scavengingScheduled = false;
1478}
1479
1480ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1481{
1482 if (!m_scavengingScheduled && shouldContinueScavenging()) {
1483 m_scavengingScheduled = true;
1484 dispatch_resume(m_scavengeTimer);
1485 }
1486}
1487
1488#endif
1489
ba379fdc
A
1490void TCMalloc_PageHeap::scavenge()
1491{
1492 // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
1493 // for the amount of allocations that we do. So hold off on releasing memory back to the system.
1494 if (pages_committed_since_last_scavenge_ > 0) {
1495 pages_committed_since_last_scavenge_ = 0;
1496 return;
1497 }
1498 Length pagesDecommitted = 0;
1499 for (int i = kMaxPages; i >= 0; i--) {
1500 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1501 if (!DLL_IsEmpty(&slist->normal)) {
1502 // Release the last span on the normal portion of this list
1503 Span* s = slist->normal.prev;
1504 // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
1505 if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_)
1506 continue;
1507 DLL_Remove(s);
1508 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1509 static_cast<size_t>(s->length << kPageShift));
1510 if (!s->decommitted) {
1511 pagesDecommitted += s->length;
1512 s->decommitted = true;
1513 }
1514 DLL_Prepend(&slist->returned, s);
1515 // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
1516 if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted)
1517 break;
1518 }
1519 }
1520 pages_committed_since_last_scavenge_ = 0;
1521 ASSERT(free_committed_pages_ >= pagesDecommitted);
1522 free_committed_pages_ -= pagesDecommitted;
1523}
1524
f9bf01c6 1525ALWAYS_INLINE bool TCMalloc_PageHeap::shouldContinueScavenging() const
ba379fdc
A
1526{
1527 return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1528}
1529
1530#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1531
b37bf2e1
A
1532inline Span* TCMalloc_PageHeap::New(Length n) {
1533 ASSERT(Check());
1534 ASSERT(n > 0);
1535
1536 // Find first size >= n that has a non-empty list
1537 for (Length s = n; s < kMaxPages; s++) {
1538 Span* ll = NULL;
1539 bool released = false;
1540 if (!DLL_IsEmpty(&free_[s].normal)) {
1541 // Found normal span
1542 ll = &free_[s].normal;
1543 } else if (!DLL_IsEmpty(&free_[s].returned)) {
1544 // Found returned span; reallocate it
1545 ll = &free_[s].returned;
1546 released = true;
1547 } else {
1548 // Keep looking in larger classes
1549 continue;
1550 }
1551
1552 Span* result = ll->next;
1553 Carve(result, n, released);
9dae56ea
A
1554 if (result->decommitted) {
1555 TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
1556 result->decommitted = false;
ba379fdc
A
1557#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1558 pages_committed_since_last_scavenge_ += n;
9dae56ea 1559#endif
ba379fdc
A
1560 }
1561#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1562 else {
1563 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1564 // free committed pages count.
1565 ASSERT(free_committed_pages_ >= n);
1566 free_committed_pages_ -= n;
1567 }
1568#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1569 ASSERT(Check());
1570 free_pages_ -= n;
1571 return result;
1572 }
1573
1574 Span* result = AllocLarge(n);
9dae56ea
A
1575 if (result != NULL) {
1576 ASSERT_SPAN_COMMITTED(result);
1577 return result;
1578 }
b37bf2e1
A
1579
1580 // Grow the heap and try again
1581 if (!GrowHeap(n)) {
1582 ASSERT(Check());
1583 return NULL;
1584 }
1585
1586 return AllocLarge(n);
1587}
1588
1589Span* TCMalloc_PageHeap::AllocLarge(Length n) {
1590 // find the best span (closest to n in size).
1591 // The following loops implements address-ordered best-fit.
1592 bool from_released = false;
1593 Span *best = NULL;
1594
1595 // Search through normal list
1596 for (Span* span = large_.normal.next;
1597 span != &large_.normal;
1598 span = span->next) {
1599 if (span->length >= n) {
1600 if ((best == NULL)
1601 || (span->length < best->length)
1602 || ((span->length == best->length) && (span->start < best->start))) {
1603 best = span;
1604 from_released = false;
1605 }
1606 }
1607 }
1608
1609 // Search through released list in case it has a better fit
1610 for (Span* span = large_.returned.next;
1611 span != &large_.returned;
1612 span = span->next) {
1613 if (span->length >= n) {
1614 if ((best == NULL)
1615 || (span->length < best->length)
1616 || ((span->length == best->length) && (span->start < best->start))) {
1617 best = span;
1618 from_released = true;
1619 }
1620 }
1621 }
1622
1623 if (best != NULL) {
1624 Carve(best, n, from_released);
9dae56ea
A
1625 if (best->decommitted) {
1626 TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
1627 best->decommitted = false;
ba379fdc
A
1628#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1629 pages_committed_since_last_scavenge_ += n;
9dae56ea 1630#endif
ba379fdc
A
1631 }
1632#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1633 else {
1634 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1635 // free committed pages count.
1636 ASSERT(free_committed_pages_ >= n);
1637 free_committed_pages_ -= n;
1638 }
1639#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1640 ASSERT(Check());
1641 free_pages_ -= n;
1642 return best;
1643 }
1644 return NULL;
1645}
1646
1647Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
1648 ASSERT(0 < n);
1649 ASSERT(n < span->length);
1650 ASSERT(!span->free);
1651 ASSERT(span->sizeclass == 0);
1652 Event(span, 'T', n);
1653
1654 const Length extra = span->length - n;
1655 Span* leftover = NewSpan(span->start + n, extra);
1656 Event(leftover, 'U', extra);
1657 RecordSpan(leftover);
1658 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
1659 span->length = n;
1660
1661 return leftover;
1662}
1663
9dae56ea
A
1664static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source)
1665{
1666 destination->decommitted = source->decommitted;
1667}
9dae56ea 1668
b37bf2e1
A
1669inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
1670 ASSERT(n > 0);
1671 DLL_Remove(span);
1672 span->free = 0;
1673 Event(span, 'A', n);
1674
1675 const int extra = static_cast<int>(span->length - n);
1676 ASSERT(extra >= 0);
1677 if (extra > 0) {
1678 Span* leftover = NewSpan(span->start + n, extra);
1679 leftover->free = 1;
9dae56ea 1680 propagateDecommittedState(leftover, span);
b37bf2e1
A
1681 Event(leftover, 'S', extra);
1682 RecordSpan(leftover);
1683
1684 // Place leftover span on appropriate free list
1685 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
1686 Span* dst = released ? &listpair->returned : &listpair->normal;
1687 DLL_Prepend(dst, leftover);
1688
1689 span->length = n;
1690 pagemap_.set(span->start + n - 1, span);
1691 }
1692}
1693
9dae56ea
A
1694static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
1695{
ba379fdc
A
1696 if (destination->decommitted && !other->decommitted) {
1697 TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
1698 static_cast<size_t>(other->length << kPageShift));
1699 } else if (other->decommitted && !destination->decommitted) {
1700 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
1701 static_cast<size_t>(destination->length << kPageShift));
9dae56ea 1702 destination->decommitted = true;
ba379fdc 1703 }
9dae56ea 1704}
9dae56ea 1705
b37bf2e1
A
1706inline void TCMalloc_PageHeap::Delete(Span* span) {
1707 ASSERT(Check());
1708 ASSERT(!span->free);
1709 ASSERT(span->length > 0);
1710 ASSERT(GetDescriptor(span->start) == span);
1711 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
1712 span->sizeclass = 0;
9dae56ea 1713#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1 1714 span->sample = 0;
9dae56ea 1715#endif
b37bf2e1
A
1716
1717 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1718 // necessary. We do not bother resetting the stale pagemap
1719 // entries for the pieces we are merging together because we only
1720 // care about the pagemap entries for the boundaries.
ba379fdc
A
1721#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1722 // Track the total size of the neighboring free spans that are committed.
1723 Length neighboringCommittedSpansLength = 0;
1724#endif
b37bf2e1
A
1725 const PageID p = span->start;
1726 const Length n = span->length;
1727 Span* prev = GetDescriptor(p-1);
1728 if (prev != NULL && prev->free) {
1729 // Merge preceding span into this span
1730 ASSERT(prev->start + prev->length == p);
1731 const Length len = prev->length;
ba379fdc
A
1732#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1733 if (!prev->decommitted)
1734 neighboringCommittedSpansLength += len;
1735#endif
9dae56ea 1736 mergeDecommittedStates(span, prev);
b37bf2e1
A
1737 DLL_Remove(prev);
1738 DeleteSpan(prev);
1739 span->start -= len;
1740 span->length += len;
1741 pagemap_.set(span->start, span);
1742 Event(span, 'L', len);
1743 }
1744 Span* next = GetDescriptor(p+n);
1745 if (next != NULL && next->free) {
1746 // Merge next span into this span
1747 ASSERT(next->start == p+n);
1748 const Length len = next->length;
ba379fdc
A
1749#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1750 if (!next->decommitted)
1751 neighboringCommittedSpansLength += len;
1752#endif
9dae56ea 1753 mergeDecommittedStates(span, next);
b37bf2e1
A
1754 DLL_Remove(next);
1755 DeleteSpan(next);
1756 span->length += len;
1757 pagemap_.set(span->start + span->length - 1, span);
1758 Event(span, 'R', len);
1759 }
1760
1761 Event(span, 'D', span->length);
1762 span->free = 1;
ba379fdc
A
1763 if (span->decommitted) {
1764 if (span->length < kMaxPages)
1765 DLL_Prepend(&free_[span->length].returned, span);
1766 else
1767 DLL_Prepend(&large_.returned, span);
b37bf2e1 1768 } else {
ba379fdc
A
1769 if (span->length < kMaxPages)
1770 DLL_Prepend(&free_[span->length].normal, span);
1771 else
1772 DLL_Prepend(&large_.normal, span);
b37bf2e1
A
1773 }
1774 free_pages_ += n;
1775
ba379fdc
A
1776#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1777 if (span->decommitted) {
1778 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
1779 // committed. Update the free committed pages count.
1780 free_committed_pages_ -= neighboringCommittedSpansLength;
1781 } else {
1782 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
1783 free_committed_pages_ += n;
1784 }
1785
1786 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
f9bf01c6 1787 signalScavenger();
ba379fdc 1788#else
b37bf2e1 1789 IncrementalScavenge(n);
ba379fdc
A
1790#endif
1791
b37bf2e1
A
1792 ASSERT(Check());
1793}
1794
ba379fdc 1795#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1796void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
1797 // Fast path; not yet time to release memory
1798 scavenge_counter_ -= n;
1799 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
1800
8537cb5c 1801 static const size_t kDefaultReleaseDelay = 64;
b37bf2e1
A
1802
1803 // Find index of free list to scavenge
1804 size_t index = scavenge_index_ + 1;
1805 for (size_t i = 0; i < kMaxPages+1; i++) {
1806 if (index > kMaxPages) index = 0;
1807 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
1808 if (!DLL_IsEmpty(&slist->normal)) {
1809 // Release the last span on the normal portion of this list
1810 Span* s = slist->normal.prev;
1811 DLL_Remove(s);
1812 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1813 static_cast<size_t>(s->length << kPageShift));
9dae56ea 1814 s->decommitted = true;
b37bf2e1
A
1815 DLL_Prepend(&slist->returned, s);
1816
8537cb5c 1817 scavenge_counter_ = std::max<size_t>(16UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
b37bf2e1
A
1818
1819 if (index == kMaxPages && !DLL_IsEmpty(&slist->normal))
1820 scavenge_index_ = index - 1;
1821 else
1822 scavenge_index_ = index;
1823 return;
1824 }
1825 index++;
1826 }
1827
1828 // Nothing to scavenge, delay for a while
1829 scavenge_counter_ = kDefaultReleaseDelay;
1830}
ba379fdc 1831#endif
b37bf2e1
A
1832
1833void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
1834 // Associate span object with all interior pages as well
1835 ASSERT(!span->free);
1836 ASSERT(GetDescriptor(span->start) == span);
1837 ASSERT(GetDescriptor(span->start+span->length-1) == span);
1838 Event(span, 'C', sc);
1839 span->sizeclass = static_cast<unsigned int>(sc);
1840 for (Length i = 1; i < span->length-1; i++) {
1841 pagemap_.set(span->start+i, span);
1842 }
1843}
9dae56ea
A
1844
1845#ifdef WTF_CHANGES
1846size_t TCMalloc_PageHeap::ReturnedBytes() const {
1847 size_t result = 0;
1848 for (unsigned s = 0; s < kMaxPages; s++) {
1849 const int r_length = DLL_Length(&free_[s].returned);
1850 unsigned r_pages = s * r_length;
1851 result += r_pages << kPageShift;
1852 }
1853
1854 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next)
1855 result += s->length << kPageShift;
1856 return result;
1857}
1858#endif
b37bf2e1
A
1859
1860#ifndef WTF_CHANGES
1861static double PagesToMB(uint64_t pages) {
1862 return (pages << kPageShift) / 1048576.0;
1863}
1864
1865void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
1866 int nonempty_sizes = 0;
1867 for (int s = 0; s < kMaxPages; s++) {
1868 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
1869 nonempty_sizes++;
1870 }
1871 }
1872 out->printf("------------------------------------------------\n");
1873 out->printf("PageHeap: %d sizes; %6.1f MB free\n",
1874 nonempty_sizes, PagesToMB(free_pages_));
1875 out->printf("------------------------------------------------\n");
1876 uint64_t total_normal = 0;
1877 uint64_t total_returned = 0;
1878 for (int s = 0; s < kMaxPages; s++) {
1879 const int n_length = DLL_Length(&free_[s].normal);
1880 const int r_length = DLL_Length(&free_[s].returned);
1881 if (n_length + r_length > 0) {
1882 uint64_t n_pages = s * n_length;
1883 uint64_t r_pages = s * r_length;
1884 total_normal += n_pages;
1885 total_returned += r_pages;
1886 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
1887 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1888 s,
1889 (n_length + r_length),
1890 PagesToMB(n_pages + r_pages),
1891 PagesToMB(total_normal + total_returned),
1892 PagesToMB(r_pages),
1893 PagesToMB(total_returned));
1894 }
1895 }
1896
1897 uint64_t n_pages = 0;
1898 uint64_t r_pages = 0;
1899 int n_spans = 0;
1900 int r_spans = 0;
1901 out->printf("Normal large spans:\n");
1902 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
1903 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
1904 s->length, PagesToMB(s->length));
1905 n_pages += s->length;
1906 n_spans++;
1907 }
1908 out->printf("Unmapped large spans:\n");
1909 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
1910 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
1911 s->length, PagesToMB(s->length));
1912 r_pages += s->length;
1913 r_spans++;
1914 }
1915 total_normal += n_pages;
1916 total_returned += r_pages;
1917 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
1918 "; unmapped: %6.1f MB; %6.1f MB cum\n",
1919 (n_spans + r_spans),
1920 PagesToMB(n_pages + r_pages),
1921 PagesToMB(total_normal + total_returned),
1922 PagesToMB(r_pages),
1923 PagesToMB(total_returned));
1924}
1925#endif
1926
1927bool TCMalloc_PageHeap::GrowHeap(Length n) {
1928 ASSERT(kMaxPages >= kMinSystemAlloc);
1929 if (n > kMaxValidPages) return false;
1930 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
1931 size_t actual_size;
1932 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
1933 if (ptr == NULL) {
1934 if (n < ask) {
1935 // Try growing just "n" pages
1936 ask = n;
9dae56ea 1937 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
b37bf2e1
A
1938 }
1939 if (ptr == NULL) return false;
1940 }
1941 ask = actual_size >> kPageShift;
1942
ba379fdc
A
1943#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1944 pages_committed_since_last_scavenge_ += ask;
1945#endif
1946
b37bf2e1
A
1947 uint64_t old_system_bytes = system_bytes_;
1948 system_bytes_ += (ask << kPageShift);
1949 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
1950 ASSERT(p > 0);
1951
1952 // If we have already a lot of pages allocated, just pre allocate a bunch of
1953 // memory for the page map. This prevents fragmentation by pagemap metadata
1954 // when a program keeps allocating and freeing large blocks.
1955
1956 if (old_system_bytes < kPageMapBigAllocationThreshold
1957 && system_bytes_ >= kPageMapBigAllocationThreshold) {
1958 pagemap_.PreallocateMoreMemory();
1959 }
1960
1961 // Make sure pagemap_ has entries for all of the new pages.
1962 // Plus ensure one before and one after so coalescing code
1963 // does not need bounds-checking.
1964 if (pagemap_.Ensure(p-1, ask+2)) {
1965 // Pretend the new area is allocated and then Delete() it to
1966 // cause any necessary coalescing to occur.
1967 //
1968 // We do not adjust free_pages_ here since Delete() will do it for us.
1969 Span* span = NewSpan(p, ask);
1970 RecordSpan(span);
1971 Delete(span);
1972 ASSERT(Check());
1973 return true;
1974 } else {
1975 // We could not allocate memory within "pagemap_"
1976 // TODO: Once we can return memory to the system, return the new span
1977 return false;
1978 }
1979}
1980
1981bool TCMalloc_PageHeap::Check() {
1982 ASSERT(free_[0].normal.next == &free_[0].normal);
1983 ASSERT(free_[0].returned.next == &free_[0].returned);
1984 CheckList(&large_.normal, kMaxPages, 1000000000);
1985 CheckList(&large_.returned, kMaxPages, 1000000000);
1986 for (Length s = 1; s < kMaxPages; s++) {
1987 CheckList(&free_[s].normal, s, s);
1988 CheckList(&free_[s].returned, s, s);
1989 }
1990 return true;
1991}
1992
1993#if ASSERT_DISABLED
1994bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {
1995 return true;
1996}
1997#else
1998bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
1999 for (Span* s = list->next; s != list; s = s->next) {
2000 CHECK_CONDITION(s->free);
2001 CHECK_CONDITION(s->length >= min_pages);
2002 CHECK_CONDITION(s->length <= max_pages);
2003 CHECK_CONDITION(GetDescriptor(s->start) == s);
2004 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
2005 }
2006 return true;
2007}
2008#endif
2009
2010static void ReleaseFreeList(Span* list, Span* returned) {
2011 // Walk backwards through list so that when we push these
2012 // spans on the "returned" list, we preserve the order.
2013 while (!DLL_IsEmpty(list)) {
2014 Span* s = list->prev;
2015 DLL_Remove(s);
2016 DLL_Prepend(returned, s);
2017 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2018 static_cast<size_t>(s->length << kPageShift));
2019 }
2020}
2021
2022void TCMalloc_PageHeap::ReleaseFreePages() {
2023 for (Length s = 0; s < kMaxPages; s++) {
2024 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
2025 }
2026 ReleaseFreeList(&large_.normal, &large_.returned);
2027 ASSERT(Check());
2028}
2029
2030//-------------------------------------------------------------------
2031// Free list
2032//-------------------------------------------------------------------
2033
2034class TCMalloc_ThreadCache_FreeList {
2035 private:
2036 void* list_; // Linked list of nodes
2037 uint16_t length_; // Current length
2038 uint16_t lowater_; // Low water mark for list length
2039
2040 public:
2041 void Init() {
2042 list_ = NULL;
2043 length_ = 0;
2044 lowater_ = 0;
2045 }
2046
2047 // Return current length of list
2048 int length() const {
2049 return length_;
2050 }
2051
2052 // Is list empty?
2053 bool empty() const {
2054 return list_ == NULL;
2055 }
2056
2057 // Low-water mark management
2058 int lowwatermark() const { return lowater_; }
2059 void clear_lowwatermark() { lowater_ = length_; }
2060
2061 ALWAYS_INLINE void Push(void* ptr) {
2062 SLL_Push(&list_, ptr);
2063 length_++;
2064 }
2065
2066 void PushRange(int N, void *start, void *end) {
2067 SLL_PushRange(&list_, start, end);
2068 length_ = length_ + static_cast<uint16_t>(N);
2069 }
2070
2071 void PopRange(int N, void **start, void **end) {
2072 SLL_PopRange(&list_, N, start, end);
2073 ASSERT(length_ >= N);
2074 length_ = length_ - static_cast<uint16_t>(N);
2075 if (length_ < lowater_) lowater_ = length_;
2076 }
2077
2078 ALWAYS_INLINE void* Pop() {
2079 ASSERT(list_ != NULL);
2080 length_--;
2081 if (length_ < lowater_) lowater_ = length_;
2082 return SLL_Pop(&list_);
2083 }
2084
2085#ifdef WTF_CHANGES
2086 template <class Finder, class Reader>
2087 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2088 {
2089 for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
2090 finder.visit(nextObject);
2091 }
2092#endif
2093};
2094
2095//-------------------------------------------------------------------
2096// Data kept per thread
2097//-------------------------------------------------------------------
2098
2099class TCMalloc_ThreadCache {
2100 private:
2101 typedef TCMalloc_ThreadCache_FreeList FreeList;
2102#if COMPILER(MSVC)
2103 typedef DWORD ThreadIdentifier;
2104#else
2105 typedef pthread_t ThreadIdentifier;
2106#endif
2107
2108 size_t size_; // Combined size of data
2109 ThreadIdentifier tid_; // Which thread owns it
2110 bool in_setspecific_; // Called pthread_setspecific?
2111 FreeList list_[kNumClasses]; // Array indexed by size-class
2112
2113 // We sample allocations, biased by the size of the allocation
2114 uint32_t rnd_; // Cheap random number generator
2115 size_t bytes_until_sample_; // Bytes until we sample next
2116
2117 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2118 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);
2119
2120 // Use only as pthread thread-specific destructor function.
2121 static void DestroyThreadCache(void* ptr);
2122 public:
2123 // All ThreadCache objects are kept in a linked list (for stats collection)
2124 TCMalloc_ThreadCache* next_;
2125 TCMalloc_ThreadCache* prev_;
2126
2127 void Init(ThreadIdentifier tid);
2128 void Cleanup();
2129
2130 // Accessors (mostly just for printing stats)
2131 int freelist_length(size_t cl) const { return list_[cl].length(); }
2132
2133 // Total byte size in cache
2134 size_t Size() const { return size_; }
2135
2136 void* Allocate(size_t size);
2137 void Deallocate(void* ptr, size_t size_class);
2138
2139 void FetchFromCentralCache(size_t cl, size_t allocationSize);
2140 void ReleaseToCentralCache(size_t cl, int N);
2141 void Scavenge();
2142 void Print() const;
2143
2144 // Record allocation of "k" bytes. Return true iff allocation
2145 // should be sampled
2146 bool SampleAllocation(size_t k);
2147
2148 // Pick next sampling point
2149 void PickNextSample(size_t k);
2150
2151 static void InitModule();
2152 static void InitTSD();
2153 static TCMalloc_ThreadCache* GetThreadHeap();
2154 static TCMalloc_ThreadCache* GetCache();
2155 static TCMalloc_ThreadCache* GetCacheIfPresent();
2156 static TCMalloc_ThreadCache* CreateCacheIfNecessary();
2157 static void DeleteCache(TCMalloc_ThreadCache* heap);
2158 static void BecomeIdle();
2159 static void RecomputeThreadCacheSize();
2160
2161#ifdef WTF_CHANGES
2162 template <class Finder, class Reader>
2163 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2164 {
2165 for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
2166 list_[sizeClass].enumerateFreeObjects(finder, reader);
2167 }
2168#endif
2169};
2170
2171//-------------------------------------------------------------------
2172// Data kept per size-class in central cache
2173//-------------------------------------------------------------------
2174
2175class TCMalloc_Central_FreeList {
2176 public:
2177 void Init(size_t cl);
2178
2179 // These methods all do internal locking.
2180
2181 // Insert the specified range into the central freelist. N is the number of
2182 // elements in the range.
2183 void InsertRange(void *start, void *end, int N);
2184
2185 // Returns the actual number of fetched elements into N.
2186 void RemoveRange(void **start, void **end, int *N);
2187
2188 // Returns the number of free objects in cache.
2189 size_t length() {
2190 SpinLockHolder h(&lock_);
2191 return counter_;
2192 }
2193
2194 // Returns the number of free objects in the transfer cache.
2195 int tc_length() {
2196 SpinLockHolder h(&lock_);
2197 return used_slots_ * num_objects_to_move[size_class_];
2198 }
2199
2200#ifdef WTF_CHANGES
2201 template <class Finder, class Reader>
9dae56ea 2202 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
b37bf2e1
A
2203 {
2204 for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0))
2205 ASSERT(!span->objects);
2206
2207 ASSERT(!nonempty_.objects);
9dae56ea
A
2208 static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
2209
2210 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
2211 Span* remoteSpan = nonempty_.next;
2212
2213 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {
b37bf2e1
A
2214 for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
2215 finder.visit(nextObject);
2216 }
2217 }
2218#endif
2219
2220 private:
2221 // REQUIRES: lock_ is held
2222 // Remove object from cache and return.
2223 // Return NULL if no free entries in cache.
2224 void* FetchFromSpans();
2225
2226 // REQUIRES: lock_ is held
2227 // Remove object from cache and return. Fetches
2228 // from pageheap if cache is empty. Only returns
2229 // NULL on allocation failure.
2230 void* FetchFromSpansSafe();
2231
2232 // REQUIRES: lock_ is held
2233 // Release a linked list of objects to spans.
2234 // May temporarily release lock_.
2235 void ReleaseListToSpans(void *start);
2236
2237 // REQUIRES: lock_ is held
2238 // Release an object to spans.
2239 // May temporarily release lock_.
2240 void ReleaseToSpans(void* object);
2241
2242 // REQUIRES: lock_ is held
2243 // Populate cache by fetching from the page heap.
2244 // May temporarily release lock_.
2245 void Populate();
2246
2247 // REQUIRES: lock is held.
2248 // Tries to make room for a TCEntry. If the cache is full it will try to
2249 // expand it at the cost of some other cache size. Return false if there is
2250 // no space.
2251 bool MakeCacheSpace();
2252
2253 // REQUIRES: lock_ for locked_size_class is held.
2254 // Picks a "random" size class to steal TCEntry slot from. In reality it
2255 // just iterates over the sizeclasses but does so without taking a lock.
2256 // Returns true on success.
2257 // May temporarily lock a "random" size class.
2258 static bool EvictRandomSizeClass(size_t locked_size_class, bool force);
2259
2260 // REQUIRES: lock_ is *not* held.
2261 // Tries to shrink the Cache. If force is true it will relase objects to
2262 // spans if it allows it to shrink the cache. Return false if it failed to
2263 // shrink the cache. Decrements cache_size_ on succeess.
2264 // May temporarily take lock_. If it takes lock_, the locked_size_class
2265 // lock is released to the thread from holding two size class locks
2266 // concurrently which could lead to a deadlock.
2267 bool ShrinkCache(int locked_size_class, bool force);
2268
2269 // This lock protects all the data members. cached_entries and cache_size_
2270 // may be looked at without holding the lock.
2271 SpinLock lock_;
2272
2273 // We keep linked lists of empty and non-empty spans.
2274 size_t size_class_; // My size class
2275 Span empty_; // Dummy header for list of empty spans
2276 Span nonempty_; // Dummy header for list of non-empty spans
2277 size_t counter_; // Number of free objects in cache entry
2278
2279 // Here we reserve space for TCEntry cache slots. Since one size class can
2280 // end up getting all the TCEntries quota in the system we just preallocate
2281 // sufficient number of entries here.
2282 TCEntry tc_slots_[kNumTransferEntries];
2283
2284 // Number of currently used cached entries in tc_slots_. This variable is
2285 // updated under a lock but can be read without one.
2286 int32_t used_slots_;
2287 // The current number of slots for this size class. This is an
2288 // adaptive value that is increased if there is lots of traffic
2289 // on a given size class.
2290 int32_t cache_size_;
2291};
2292
2293// Pad each CentralCache object to multiple of 64 bytes
2294class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
2295 private:
2296 char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
2297};
2298
2299//-------------------------------------------------------------------
2300// Global variables
2301//-------------------------------------------------------------------
2302
2303// Central cache -- a collection of free-lists, one per size-class.
2304// We have a separate lock per free-list to reduce contention.
2305static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
2306
2307// Page-level allocator
2308static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
2309
2310#if PLATFORM(ARM)
2311static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned));
2312#else
2313static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)];
2314#endif
2315static bool phinited = false;
2316
2317// Avoid extra level of indirection by making "pageheap" be just an alias
2318// of pageheap_memory.
2319typedef union {
2320 void* m_memory;
2321 TCMalloc_PageHeap* m_pageHeap;
2322} PageHeapUnion;
2323
2324static inline TCMalloc_PageHeap* getPageHeap()
2325{
2326 PageHeapUnion u = { &pageheap_memory[0] };
2327 return u.m_pageHeap;
2328}
2329
2330#define pageheap getPageHeap()
2331
ba379fdc 2332#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
f9bf01c6
A
2333
2334#if !HAVE(DISPATCH_H)
2335#if OS(WINDOWS)
ba379fdc
A
2336static void sleep(unsigned seconds)
2337{
2338 ::Sleep(seconds * 1000);
2339}
2340#endif
2341
2342void TCMalloc_PageHeap::scavengerThread()
2343{
2344#if HAVE(PTHREAD_SETNAME_NP)
2345 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2346#endif
2347
2348 while (1) {
2349 if (!shouldContinueScavenging()) {
2350 pthread_mutex_lock(&m_scavengeMutex);
2351 m_scavengeThreadActive = false;
2352 // Block until there are enough freed pages to release back to the system.
2353 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
2354 m_scavengeThreadActive = true;
2355 pthread_mutex_unlock(&m_scavengeMutex);
2356 }
2357 sleep(kScavengeTimerDelayInSeconds);
2358 {
2359 SpinLockHolder h(&pageheap_lock);
2360 pageheap->scavenge();
2361 }
2362 }
2363}
f9bf01c6
A
2364
2365#else
2366
2367void TCMalloc_PageHeap::periodicScavenge()
2368{
2369 {
2370 SpinLockHolder h(&pageheap_lock);
2371 pageheap->scavenge();
2372 }
2373
2374 if (!shouldContinueScavenging()) {
2375 m_scavengingScheduled = false;
2376 dispatch_suspend(m_scavengeTimer);
2377 }
2378}
2379#endif // HAVE(DISPATCH_H)
2380
ba379fdc
A
2381#endif
2382
b37bf2e1
A
2383// If TLS is available, we also store a copy
2384// of the per-thread object in a __thread variable
2385// since __thread variables are faster to read
2386// than pthread_getspecific(). We still need
2387// pthread_setspecific() because __thread
2388// variables provide no way to run cleanup
2389// code when a thread is destroyed.
2390#ifdef HAVE_TLS
2391static __thread TCMalloc_ThreadCache *threadlocal_heap;
2392#endif
2393// Thread-specific key. Initialization here is somewhat tricky
2394// because some Linux startup code invokes malloc() before it
2395// is in a good enough state to handle pthread_keycreate().
2396// Therefore, we use TSD keys only after tsd_inited is set to true.
2397// Until then, we use a slow path to get the heap object.
2398static bool tsd_inited = false;
2399static pthread_key_t heap_key;
2400#if COMPILER(MSVC)
2401DWORD tlsIndex = TLS_OUT_OF_INDEXES;
2402#endif
2403
2404static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
2405{
2406 // still do pthread_setspecific when using MSVC fast TLS to
2407 // benefit from the delete callback.
2408 pthread_setspecific(heap_key, heap);
2409#if COMPILER(MSVC)
2410 TlsSetValue(tlsIndex, heap);
2411#endif
2412}
2413
2414// Allocator for thread heaps
2415static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
2416
2417// Linked list of heap objects. Protected by pageheap_lock.
2418static TCMalloc_ThreadCache* thread_heaps = NULL;
2419static int thread_heap_count = 0;
2420
2421// Overall thread cache size. Protected by pageheap_lock.
2422static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
2423
2424// Global per-thread cache size. Writes are protected by
2425// pageheap_lock. Reads are done without any locking, which should be
2426// fine as long as size_t can be written atomically and we don't place
2427// invariants between this variable and other pieces of state.
2428static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
2429
2430//-------------------------------------------------------------------
2431// Central cache implementation
2432//-------------------------------------------------------------------
2433
2434void TCMalloc_Central_FreeList::Init(size_t cl) {
2435 lock_.Init();
2436 size_class_ = cl;
2437 DLL_Init(&empty_);
2438 DLL_Init(&nonempty_);
2439 counter_ = 0;
2440
2441 cache_size_ = 1;
2442 used_slots_ = 0;
2443 ASSERT(cache_size_ <= kNumTransferEntries);
2444}
2445
2446void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
2447 while (start) {
2448 void *next = SLL_Next(start);
2449 ReleaseToSpans(start);
2450 start = next;
2451 }
2452}
2453
2454ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
2455 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
2456 Span* span = pageheap->GetDescriptor(p);
2457 ASSERT(span != NULL);
2458 ASSERT(span->refcount > 0);
2459
2460 // If span is empty, move it to non-empty list
2461 if (span->objects == NULL) {
2462 DLL_Remove(span);
2463 DLL_Prepend(&nonempty_, span);
2464 Event(span, 'N', 0);
2465 }
2466
2467 // The following check is expensive, so it is disabled by default
2468 if (false) {
2469 // Check that object does not occur in list
f9bf01c6 2470 unsigned got = 0;
b37bf2e1
A
2471 for (void* p = span->objects; p != NULL; p = *((void**) p)) {
2472 ASSERT(p != object);
2473 got++;
2474 }
2475 ASSERT(got + span->refcount ==
2476 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
2477 }
2478
2479 counter_++;
2480 span->refcount--;
2481 if (span->refcount == 0) {
2482 Event(span, '#', 0);
2483 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
2484 DLL_Remove(span);
2485
2486 // Release central list lock while operating on pageheap
2487 lock_.Unlock();
2488 {
2489 SpinLockHolder h(&pageheap_lock);
2490 pageheap->Delete(span);
2491 }
2492 lock_.Lock();
2493 } else {
2494 *(reinterpret_cast<void**>(object)) = span->objects;
2495 span->objects = object;
2496 }
2497}
2498
2499ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2500 size_t locked_size_class, bool force) {
2501 static int race_counter = 0;
2502 int t = race_counter++; // Updated without a lock, but who cares.
2503 if (t >= static_cast<int>(kNumClasses)) {
2504 while (t >= static_cast<int>(kNumClasses)) {
2505 t -= kNumClasses;
2506 }
2507 race_counter = t;
2508 }
2509 ASSERT(t >= 0);
2510 ASSERT(t < static_cast<int>(kNumClasses));
2511 if (t == static_cast<int>(locked_size_class)) return false;
2512 return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
2513}
2514
2515bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2516 // Is there room in the cache?
2517 if (used_slots_ < cache_size_) return true;
2518 // Check if we can expand this cache?
2519 if (cache_size_ == kNumTransferEntries) return false;
2520 // Ok, we'll try to grab an entry from some other size class.
2521 if (EvictRandomSizeClass(size_class_, false) ||
2522 EvictRandomSizeClass(size_class_, true)) {
2523 // Succeeded in evicting, we're going to make our cache larger.
2524 cache_size_++;
2525 return true;
2526 }
2527 return false;
2528}
2529
2530
2531namespace {
2532class LockInverter {
2533 private:
2534 SpinLock *held_, *temp_;
2535 public:
2536 inline explicit LockInverter(SpinLock* held, SpinLock *temp)
2537 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
2538 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
2539};
2540}
2541
2542bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
2543 // Start with a quick check without taking a lock.
2544 if (cache_size_ == 0) return false;
2545 // We don't evict from a full cache unless we are 'forcing'.
2546 if (force == false && used_slots_ == cache_size_) return false;
2547
2548 // Grab lock, but first release the other lock held by this thread. We use
2549 // the lock inverter to ensure that we never hold two size class locks
2550 // concurrently. That can create a deadlock because there is no well
2551 // defined nesting order.
2552 LockInverter li(&central_cache[locked_size_class].lock_, &lock_);
2553 ASSERT(used_slots_ <= cache_size_);
2554 ASSERT(0 <= cache_size_);
2555 if (cache_size_ == 0) return false;
2556 if (used_slots_ == cache_size_) {
2557 if (force == false) return false;
2558 // ReleaseListToSpans releases the lock, so we have to make all the
2559 // updates to the central list before calling it.
2560 cache_size_--;
2561 used_slots_--;
2562 ReleaseListToSpans(tc_slots_[used_slots_].head);
2563 return true;
2564 }
2565 cache_size_--;
2566 return true;
2567}
2568
2569void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
2570 SpinLockHolder h(&lock_);
2571 if (N == num_objects_to_move[size_class_] &&
2572 MakeCacheSpace()) {
2573 int slot = used_slots_++;
2574 ASSERT(slot >=0);
2575 ASSERT(slot < kNumTransferEntries);
2576 TCEntry *entry = &tc_slots_[slot];
2577 entry->head = start;
2578 entry->tail = end;
2579 return;
2580 }
2581 ReleaseListToSpans(start);
2582}
2583
2584void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
2585 int num = *N;
2586 ASSERT(num > 0);
2587
2588 SpinLockHolder h(&lock_);
2589 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
2590 int slot = --used_slots_;
2591 ASSERT(slot >= 0);
2592 TCEntry *entry = &tc_slots_[slot];
2593 *start = entry->head;
2594 *end = entry->tail;
2595 return;
2596 }
2597
2598 // TODO: Prefetch multiple TCEntries?
2599 void *tail = FetchFromSpansSafe();
2600 if (!tail) {
2601 // We are completely out of memory.
2602 *start = *end = NULL;
2603 *N = 0;
2604 return;
2605 }
2606
2607 SLL_SetNext(tail, NULL);
2608 void *head = tail;
2609 int count = 1;
2610 while (count < num) {
2611 void *t = FetchFromSpans();
2612 if (!t) break;
2613 SLL_Push(&head, t);
2614 count++;
2615 }
2616 *start = head;
2617 *end = tail;
2618 *N = count;
2619}
2620
2621
2622void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2623 void *t = FetchFromSpans();
2624 if (!t) {
2625 Populate();
2626 t = FetchFromSpans();
2627 }
2628 return t;
2629}
2630
2631void* TCMalloc_Central_FreeList::FetchFromSpans() {
2632 if (DLL_IsEmpty(&nonempty_)) return NULL;
2633 Span* span = nonempty_.next;
2634
2635 ASSERT(span->objects != NULL);
9dae56ea 2636 ASSERT_SPAN_COMMITTED(span);
b37bf2e1
A
2637 span->refcount++;
2638 void* result = span->objects;
2639 span->objects = *(reinterpret_cast<void**>(result));
2640 if (span->objects == NULL) {
2641 // Move to empty list
2642 DLL_Remove(span);
2643 DLL_Prepend(&empty_, span);
2644 Event(span, 'E', 0);
2645 }
2646 counter_--;
2647 return result;
2648}
2649
2650// Fetch memory from the system and add to the central cache freelist.
2651ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
2652 // Release central list lock while operating on pageheap
2653 lock_.Unlock();
2654 const size_t npages = class_to_pages[size_class_];
2655
2656 Span* span;
2657 {
2658 SpinLockHolder h(&pageheap_lock);
2659 span = pageheap->New(npages);
2660 if (span) pageheap->RegisterSizeClass(span, size_class_);
2661 }
2662 if (span == NULL) {
2663 MESSAGE("allocation failed: %d\n", errno);
2664 lock_.Lock();
2665 return;
2666 }
9dae56ea 2667 ASSERT_SPAN_COMMITTED(span);
b37bf2e1
A
2668 ASSERT(span->length == npages);
2669 // Cache sizeclass info eagerly. Locking is not necessary.
2670 // (Instead of being eager, we could just replace any stale info
2671 // about this span, but that seems to be no better in practice.)
2672 for (size_t i = 0; i < npages; i++) {
2673 pageheap->CacheSizeClass(span->start + i, size_class_);
2674 }
2675
2676 // Split the block into pieces and add to the free-list
2677 // TODO: coloring of objects to avoid cache conflicts?
2678 void** tail = &span->objects;
2679 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
2680 char* limit = ptr + (npages << kPageShift);
2681 const size_t size = ByteSizeForClass(size_class_);
2682 int num = 0;
2683 char* nptr;
2684 while ((nptr = ptr + size) <= limit) {
2685 *tail = ptr;
2686 tail = reinterpret_cast<void**>(ptr);
2687 ptr = nptr;
2688 num++;
2689 }
2690 ASSERT(ptr <= limit);
2691 *tail = NULL;
2692 span->refcount = 0; // No sub-object in use yet
2693
2694 // Add span to list of non-empty spans
2695 lock_.Lock();
2696 DLL_Prepend(&nonempty_, span);
2697 counter_ += num;
2698}
2699
2700//-------------------------------------------------------------------
2701// TCMalloc_ThreadCache implementation
2702//-------------------------------------------------------------------
2703
2704inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
2705 if (bytes_until_sample_ < k) {
2706 PickNextSample(k);
2707 return true;
2708 } else {
2709 bytes_until_sample_ -= k;
2710 return false;
2711 }
2712}
2713
2714void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) {
2715 size_ = 0;
2716 next_ = NULL;
2717 prev_ = NULL;
2718 tid_ = tid;
2719 in_setspecific_ = false;
2720 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2721 list_[cl].Init();
2722 }
2723
2724 // Initialize RNG -- run it for a bit to get to good values
2725 bytes_until_sample_ = 0;
2726 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2727 for (int i = 0; i < 100; i++) {
2728 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
2729 }
2730}
2731
2732void TCMalloc_ThreadCache::Cleanup() {
2733 // Put unused memory back into central cache
2734 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2735 if (list_[cl].length() > 0) {
2736 ReleaseToCentralCache(cl, list_[cl].length());
2737 }
2738 }
2739}
2740
2741ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
2742 ASSERT(size <= kMaxSize);
2743 const size_t cl = SizeClass(size);
2744 FreeList* list = &list_[cl];
2745 size_t allocationSize = ByteSizeForClass(cl);
2746 if (list->empty()) {
2747 FetchFromCentralCache(cl, allocationSize);
2748 if (list->empty()) return NULL;
2749 }
2750 size_ -= allocationSize;
2751 return list->Pop();
2752}
2753
2754inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
2755 size_ += ByteSizeForClass(cl);
2756 FreeList* list = &list_[cl];
2757 list->Push(ptr);
2758 // If enough data is free, put back into central cache
2759 if (list->length() > kMaxFreeListLength) {
2760 ReleaseToCentralCache(cl, num_objects_to_move[cl]);
2761 }
2762 if (size_ >= per_thread_cache_size) Scavenge();
2763}
2764
2765// Remove some objects of class "cl" from central cache and add to thread heap
2766ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
2767 int fetch_count = num_objects_to_move[cl];
2768 void *start, *end;
2769 central_cache[cl].RemoveRange(&start, &end, &fetch_count);
2770 list_[cl].PushRange(fetch_count, start, end);
2771 size_ += allocationSize * fetch_count;
2772}
2773
2774// Remove some objects of class "cl" from thread heap and add to central cache
2775inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
2776 ASSERT(N > 0);
2777 FreeList* src = &list_[cl];
2778 if (N > src->length()) N = src->length();
2779 size_ -= N*ByteSizeForClass(cl);
2780
2781 // We return prepackaged chains of the correct size to the central cache.
2782 // TODO: Use the same format internally in the thread caches?
2783 int batch_size = num_objects_to_move[cl];
2784 while (N > batch_size) {
2785 void *tail, *head;
2786 src->PopRange(batch_size, &head, &tail);
2787 central_cache[cl].InsertRange(head, tail, batch_size);
2788 N -= batch_size;
2789 }
2790 void *tail, *head;
2791 src->PopRange(N, &head, &tail);
2792 central_cache[cl].InsertRange(head, tail, N);
2793}
2794
2795// Release idle memory to the central cache
2796inline void TCMalloc_ThreadCache::Scavenge() {
2797 // If the low-water mark for the free list is L, it means we would
2798 // not have had to allocate anything from the central cache even if
2799 // we had reduced the free list size by L. We aim to get closer to
2800 // that situation by dropping L/2 nodes from the free list. This
2801 // may not release much memory, but if so we will call scavenge again
2802 // pretty soon and the low-water marks will be high on that call.
2803 //int64 start = CycleClock::Now();
2804
2805 for (size_t cl = 0; cl < kNumClasses; cl++) {
2806 FreeList* list = &list_[cl];
2807 const int lowmark = list->lowwatermark();
2808 if (lowmark > 0) {
2809 const int drop = (lowmark > 1) ? lowmark/2 : 1;
2810 ReleaseToCentralCache(cl, drop);
2811 }
2812 list->clear_lowwatermark();
2813 }
2814
2815 //int64 finish = CycleClock::Now();
2816 //CycleTimer ct;
2817 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2818}
2819
2820void TCMalloc_ThreadCache::PickNextSample(size_t k) {
2821 // Make next "random" number
2822 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2823 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2824 uint32_t r = rnd_;
2825 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
2826
2827 // Next point is "rnd_ % (sample_period)". I.e., average
2828 // increment is "sample_period/2".
2829 const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
2830 static int last_flag_value = -1;
2831
2832 if (flag_value != last_flag_value) {
2833 SpinLockHolder h(&sample_period_lock);
2834 int i;
2835 for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
2836 if (primes_list[i] >= flag_value) {
2837 break;
2838 }
2839 }
2840 sample_period = primes_list[i];
2841 last_flag_value = flag_value;
2842 }
2843
2844 bytes_until_sample_ += rnd_ % sample_period;
2845
2846 if (k > (static_cast<size_t>(-1) >> 2)) {
2847 // If the user has asked for a huge allocation then it is possible
2848 // for the code below to loop infinitely. Just return (note that
2849 // this throws off the sampling accuracy somewhat, but a user who
2850 // is allocating more than 1G of memory at a time can live with a
2851 // minor inaccuracy in profiling of small allocations, and also
2852 // would rather not wait for the loop below to terminate).
2853 return;
2854 }
2855
2856 while (bytes_until_sample_ < k) {
2857 // Increase bytes_until_sample_ by enough average sampling periods
2858 // (sample_period >> 1) to allow us to sample past the current
2859 // allocation.
2860 bytes_until_sample_ += (sample_period >> 1);
2861 }
2862
2863 bytes_until_sample_ -= k;
2864}
2865
2866void TCMalloc_ThreadCache::InitModule() {
2867 // There is a slight potential race here because of double-checked
2868 // locking idiom. However, as long as the program does a small
2869 // allocation before switching to multi-threaded mode, we will be
2870 // fine. We increase the chances of doing such a small allocation
2871 // by doing one in the constructor of the module_enter_exit_hook
2872 // object declared below.
2873 SpinLockHolder h(&pageheap_lock);
2874 if (!phinited) {
2875#ifdef WTF_CHANGES
2876 InitTSD();
2877#endif
2878 InitSizeClasses();
2879 threadheap_allocator.Init();
2880 span_allocator.Init();
2881 span_allocator.New(); // Reduce cache conflicts
2882 span_allocator.New(); // Reduce cache conflicts
2883 stacktrace_allocator.Init();
2884 DLL_Init(&sampled_objects);
2885 for (size_t i = 0; i < kNumClasses; ++i) {
2886 central_cache[i].Init(i);
2887 }
2888 pageheap->init();
2889 phinited = 1;
f9bf01c6 2890#if defined(WTF_CHANGES) && OS(DARWIN)
b37bf2e1
A
2891 FastMallocZone::init();
2892#endif
2893 }
2894}
2895
2896inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) {
2897 // Create the heap and add it to the linked list
2898 TCMalloc_ThreadCache *heap = threadheap_allocator.New();
2899 heap->Init(tid);
2900 heap->next_ = thread_heaps;
2901 heap->prev_ = NULL;
2902 if (thread_heaps != NULL) thread_heaps->prev_ = heap;
2903 thread_heaps = heap;
2904 thread_heap_count++;
2905 RecomputeThreadCacheSize();
2906 return heap;
2907}
2908
2909inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
2910#ifdef HAVE_TLS
2911 // __thread is faster, but only when the kernel supports it
2912 if (KernelSupportsTLS())
2913 return threadlocal_heap;
2914#elif COMPILER(MSVC)
2915 return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
2916#else
2917 return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
2918#endif
2919}
2920
2921inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
2922 TCMalloc_ThreadCache* ptr = NULL;
2923 if (!tsd_inited) {
2924 InitModule();
2925 } else {
2926 ptr = GetThreadHeap();
2927 }
2928 if (ptr == NULL) ptr = CreateCacheIfNecessary();
2929 return ptr;
2930}
2931
2932// In deletion paths, we do not try to create a thread-cache. This is
2933// because we may be in the thread destruction code and may have
2934// already cleaned up the cache for this thread.
2935inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
2936 if (!tsd_inited) return NULL;
2937 void* const p = GetThreadHeap();
2938 return reinterpret_cast<TCMalloc_ThreadCache*>(p);
2939}
2940
2941void TCMalloc_ThreadCache::InitTSD() {
2942 ASSERT(!tsd_inited);
2943 pthread_key_create(&heap_key, DestroyThreadCache);
2944#if COMPILER(MSVC)
2945 tlsIndex = TlsAlloc();
2946#endif
2947 tsd_inited = true;
2948
2949#if !COMPILER(MSVC)
2950 // We may have used a fake pthread_t for the main thread. Fix it.
2951 pthread_t zero;
2952 memset(&zero, 0, sizeof(zero));
2953#endif
2954#ifndef WTF_CHANGES
2955 SpinLockHolder h(&pageheap_lock);
2956#else
2957 ASSERT(pageheap_lock.IsHeld());
2958#endif
2959 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
2960#if COMPILER(MSVC)
2961 if (h->tid_ == 0) {
2962 h->tid_ = GetCurrentThreadId();
2963 }
2964#else
2965 if (pthread_equal(h->tid_, zero)) {
2966 h->tid_ = pthread_self();
2967 }
2968#endif
2969 }
2970}
2971
2972TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
2973 // Initialize per-thread data if necessary
2974 TCMalloc_ThreadCache* heap = NULL;
2975 {
2976 SpinLockHolder h(&pageheap_lock);
2977
2978#if COMPILER(MSVC)
2979 DWORD me;
2980 if (!tsd_inited) {
2981 me = 0;
2982 } else {
2983 me = GetCurrentThreadId();
2984 }
2985#else
2986 // Early on in glibc's life, we cannot even call pthread_self()
2987 pthread_t me;
2988 if (!tsd_inited) {
2989 memset(&me, 0, sizeof(me));
2990 } else {
2991 me = pthread_self();
2992 }
2993#endif
2994
2995 // This may be a recursive malloc call from pthread_setspecific()
2996 // In that case, the heap for this thread has already been created
2997 // and added to the linked list. So we search for that first.
2998 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
2999#if COMPILER(MSVC)
3000 if (h->tid_ == me) {
3001#else
3002 if (pthread_equal(h->tid_, me)) {
3003#endif
3004 heap = h;
3005 break;
3006 }
3007 }
3008
3009 if (heap == NULL) heap = NewHeap(me);
3010 }
3011
3012 // We call pthread_setspecific() outside the lock because it may
3013 // call malloc() recursively. The recursive call will never get
3014 // here again because it will find the already allocated heap in the
3015 // linked list of heaps.
3016 if (!heap->in_setspecific_ && tsd_inited) {
3017 heap->in_setspecific_ = true;
3018 setThreadHeap(heap);
3019 }
3020 return heap;
3021}
3022
3023void TCMalloc_ThreadCache::BecomeIdle() {
3024 if (!tsd_inited) return; // No caches yet
3025 TCMalloc_ThreadCache* heap = GetThreadHeap();
3026 if (heap == NULL) return; // No thread cache to remove
3027 if (heap->in_setspecific_) return; // Do not disturb the active caller
3028
3029 heap->in_setspecific_ = true;
3030 pthread_setspecific(heap_key, NULL);
3031#ifdef HAVE_TLS
3032 // Also update the copy in __thread
3033 threadlocal_heap = NULL;
3034#endif
3035 heap->in_setspecific_ = false;
3036 if (GetThreadHeap() == heap) {
3037 // Somehow heap got reinstated by a recursive call to malloc
3038 // from pthread_setspecific. We give up in this case.
3039 return;
3040 }
3041
3042 // We can now get rid of the heap
3043 DeleteCache(heap);
3044}
3045
3046void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
3047 // Note that "ptr" cannot be NULL since pthread promises not
3048 // to invoke the destructor on NULL values, but for safety,
3049 // we check anyway.
3050 if (ptr == NULL) return;
3051#ifdef HAVE_TLS
3052 // Prevent fast path of GetThreadHeap() from returning heap.
3053 threadlocal_heap = NULL;
3054#endif
3055 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
3056}
3057
3058void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
3059 // Remove all memory from heap
3060 heap->Cleanup();
3061
3062 // Remove from linked list
3063 SpinLockHolder h(&pageheap_lock);
3064 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
3065 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
3066 if (thread_heaps == heap) thread_heaps = heap->next_;
3067 thread_heap_count--;
3068 RecomputeThreadCacheSize();
3069
3070 threadheap_allocator.Delete(heap);
3071}
3072
3073void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
3074 // Divide available space across threads
3075 int n = thread_heap_count > 0 ? thread_heap_count : 1;
3076 size_t space = overall_thread_cache_size / n;
3077
3078 // Limit to allowed range
3079 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
3080 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
3081
3082 per_thread_cache_size = space;
3083}
3084
3085void TCMalloc_ThreadCache::Print() const {
3086 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3087 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
3088 ByteSizeForClass(cl),
3089 list_[cl].length(),
3090 list_[cl].lowwatermark());
3091 }
3092}
3093
3094// Extract interesting stats
3095struct TCMallocStats {
3096 uint64_t system_bytes; // Bytes alloced from system
3097 uint64_t thread_bytes; // Bytes in thread caches
3098 uint64_t central_bytes; // Bytes in central cache
3099 uint64_t transfer_bytes; // Bytes in central transfer cache
3100 uint64_t pageheap_bytes; // Bytes in page heap
3101 uint64_t metadata_bytes; // Bytes alloced for metadata
3102};
3103
3104#ifndef WTF_CHANGES
3105// Get stats into "r". Also get per-size-class counts if class_count != NULL
3106static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
3107 r->central_bytes = 0;
3108 r->transfer_bytes = 0;
3109 for (int cl = 0; cl < kNumClasses; ++cl) {
3110 const int length = central_cache[cl].length();
3111 const int tc_length = central_cache[cl].tc_length();
3112 r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
3113 r->transfer_bytes +=
3114 static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
3115 if (class_count) class_count[cl] = length + tc_length;
3116 }
3117
3118 // Add stats from per-thread heaps
3119 r->thread_bytes = 0;
3120 { // scope
3121 SpinLockHolder h(&pageheap_lock);
3122 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3123 r->thread_bytes += h->Size();
3124 if (class_count) {
3125 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3126 class_count[cl] += h->freelist_length(cl);
3127 }
3128 }
3129 }
3130 }
3131
3132 { //scope
3133 SpinLockHolder h(&pageheap_lock);
3134 r->system_bytes = pageheap->SystemBytes();
3135 r->metadata_bytes = metadata_system_bytes;
3136 r->pageheap_bytes = pageheap->FreeBytes();
3137 }
3138}
3139#endif
3140
3141#ifndef WTF_CHANGES
3142// WRITE stats to "out"
3143static void DumpStats(TCMalloc_Printer* out, int level) {
3144 TCMallocStats stats;
3145 uint64_t class_count[kNumClasses];
3146 ExtractStats(&stats, (level >= 2 ? class_count : NULL));
3147
3148 if (level >= 2) {
3149 out->printf("------------------------------------------------\n");
3150 uint64_t cumulative = 0;
3151 for (int cl = 0; cl < kNumClasses; ++cl) {
3152 if (class_count[cl] > 0) {
3153 uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
3154 cumulative += class_bytes;
3155 out->printf("class %3d [ %8" PRIuS " bytes ] : "
3156 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
3157 cl, ByteSizeForClass(cl),
3158 class_count[cl],
3159 class_bytes / 1048576.0,
3160 cumulative / 1048576.0);
3161 }
3162 }
3163
3164 SpinLockHolder h(&pageheap_lock);
3165 pageheap->Dump(out);
3166 }
3167
3168 const uint64_t bytes_in_use = stats.system_bytes
3169 - stats.pageheap_bytes
3170 - stats.central_bytes
3171 - stats.transfer_bytes
3172 - stats.thread_bytes;
3173
3174 out->printf("------------------------------------------------\n"
3175 "MALLOC: %12" PRIu64 " Heap size\n"
3176 "MALLOC: %12" PRIu64 " Bytes in use by application\n"
3177 "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
3178 "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
3179 "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
3180 "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
3181 "MALLOC: %12" PRIu64 " Spans in use\n"
3182 "MALLOC: %12" PRIu64 " Thread heaps in use\n"
3183 "MALLOC: %12" PRIu64 " Metadata allocated\n"
3184 "------------------------------------------------\n",
3185 stats.system_bytes,
3186 bytes_in_use,
3187 stats.pageheap_bytes,
3188 stats.central_bytes,
3189 stats.transfer_bytes,
3190 stats.thread_bytes,
3191 uint64_t(span_allocator.inuse()),
3192 uint64_t(threadheap_allocator.inuse()),
3193 stats.metadata_bytes);
3194}
3195
3196static void PrintStats(int level) {
3197 const int kBufferSize = 16 << 10;
3198 char* buffer = new char[kBufferSize];
3199 TCMalloc_Printer printer(buffer, kBufferSize);
3200 DumpStats(&printer, level);
3201 write(STDERR_FILENO, buffer, strlen(buffer));
3202 delete[] buffer;
3203}
3204
3205static void** DumpStackTraces() {
3206 // Count how much space we need
3207 int needed_slots = 0;
3208 {
3209 SpinLockHolder h(&pageheap_lock);
3210 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3211 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3212 needed_slots += 3 + stack->depth;
3213 }
3214 needed_slots += 100; // Slop in case sample grows
3215 needed_slots += needed_slots/8; // An extra 12.5% slop
3216 }
3217
3218 void** result = new void*[needed_slots];
3219 if (result == NULL) {
3220 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
3221 needed_slots);
3222 return NULL;
3223 }
3224
3225 SpinLockHolder h(&pageheap_lock);
3226 int used_slots = 0;
3227 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3228 ASSERT(used_slots < needed_slots); // Need to leave room for terminator
3229 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3230 if (used_slots + 3 + stack->depth >= needed_slots) {
3231 // No more room
3232 break;
3233 }
3234
3235 result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
3236 result[used_slots+1] = reinterpret_cast<void*>(stack->size);
3237 result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
3238 for (int d = 0; d < stack->depth; d++) {
3239 result[used_slots+3+d] = stack->stack[d];
3240 }
3241 used_slots += 3 + stack->depth;
3242 }
3243 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
3244 return result;
3245}
3246#endif
3247
3248#ifndef WTF_CHANGES
3249
3250// TCMalloc's support for extra malloc interfaces
3251class TCMallocImplementation : public MallocExtension {
3252 public:
3253 virtual void GetStats(char* buffer, int buffer_length) {
3254 ASSERT(buffer_length > 0);
3255 TCMalloc_Printer printer(buffer, buffer_length);
3256
3257 // Print level one stats unless lots of space is available
3258 if (buffer_length < 10000) {
3259 DumpStats(&printer, 1);
3260 } else {
3261 DumpStats(&printer, 2);
3262 }
3263 }
3264
3265 virtual void** ReadStackTraces() {
3266 return DumpStackTraces();
3267 }
3268
3269 virtual bool GetNumericProperty(const char* name, size_t* value) {
3270 ASSERT(name != NULL);
3271
3272 if (strcmp(name, "generic.current_allocated_bytes") == 0) {
3273 TCMallocStats stats;
3274 ExtractStats(&stats, NULL);
3275 *value = stats.system_bytes
3276 - stats.thread_bytes
3277 - stats.central_bytes
3278 - stats.pageheap_bytes;
3279 return true;
3280 }
3281
3282 if (strcmp(name, "generic.heap_size") == 0) {
3283 TCMallocStats stats;
3284 ExtractStats(&stats, NULL);
3285 *value = stats.system_bytes;
3286 return true;
3287 }
3288
3289 if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
3290 // We assume that bytes in the page heap are not fragmented too
3291 // badly, and are therefore available for allocation.
3292 SpinLockHolder l(&pageheap_lock);
3293 *value = pageheap->FreeBytes();
3294 return true;
3295 }
3296
3297 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3298 SpinLockHolder l(&pageheap_lock);
3299 *value = overall_thread_cache_size;
3300 return true;
3301 }
3302
3303 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
3304 TCMallocStats stats;
3305 ExtractStats(&stats, NULL);
3306 *value = stats.thread_bytes;
3307 return true;
3308 }
3309
3310 return false;
3311 }
3312
3313 virtual bool SetNumericProperty(const char* name, size_t value) {
3314 ASSERT(name != NULL);
3315
3316 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3317 // Clip the value to a reasonable range
3318 if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
3319 if (value > (1<<30)) value = (1<<30); // Limit to 1GB
3320
3321 SpinLockHolder l(&pageheap_lock);
3322 overall_thread_cache_size = static_cast<size_t>(value);
3323 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
3324 return true;
3325 }
3326
3327 return false;
3328 }
3329
3330 virtual void MarkThreadIdle() {
3331 TCMalloc_ThreadCache::BecomeIdle();
3332 }
3333
3334 virtual void ReleaseFreeMemory() {
3335 SpinLockHolder h(&pageheap_lock);
3336 pageheap->ReleaseFreePages();
3337 }
3338};
3339#endif
3340
3341// The constructor allocates an object to ensure that initialization
3342// runs before main(), and therefore we do not have a chance to become
3343// multi-threaded before initialization. We also create the TSD key
3344// here. Presumably by the time this constructor runs, glibc is in
3345// good enough shape to handle pthread_key_create().
3346//
3347// The constructor also takes the opportunity to tell STL to use
3348// tcmalloc. We want to do this early, before construct time, so
3349// all user STL allocations go through tcmalloc (which works really
3350// well for STL).
3351//
3352// The destructor prints stats when the program exits.
3353class TCMallocGuard {
3354 public:
3355
3356 TCMallocGuard() {
3357#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3358 // Check whether the kernel also supports TLS (needs to happen at runtime)
3359 CheckIfKernelSupportsTLS();
3360#endif
3361#ifndef WTF_CHANGES
3362#ifdef WIN32 // patch the windows VirtualAlloc, etc.
3363 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
3364#endif
3365#endif
3366 free(malloc(1));
3367 TCMalloc_ThreadCache::InitTSD();
3368 free(malloc(1));
3369#ifndef WTF_CHANGES
3370 MallocExtension::Register(new TCMallocImplementation);
3371#endif
3372 }
3373
3374#ifndef WTF_CHANGES
3375 ~TCMallocGuard() {
3376 const char* env = getenv("MALLOCSTATS");
3377 if (env != NULL) {
3378 int level = atoi(env);
3379 if (level < 1) level = 1;
3380 PrintStats(level);
3381 }
3382#ifdef WIN32
3383 UnpatchWindowsFunctions();
3384#endif
3385 }
3386#endif
3387};
3388
3389#ifndef WTF_CHANGES
3390static TCMallocGuard module_enter_exit_hook;
3391#endif
3392
3393
3394//-------------------------------------------------------------------
3395// Helpers for the exported routines below
3396//-------------------------------------------------------------------
3397
3398#ifndef WTF_CHANGES
3399
3400static Span* DoSampledAllocation(size_t size) {
3401
3402 // Grab the stack trace outside the heap lock
3403 StackTrace tmp;
3404 tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
3405 tmp.size = size;
3406
3407 SpinLockHolder h(&pageheap_lock);
3408 // Allocate span
3409 Span *span = pageheap->New(pages(size == 0 ? 1 : size));
3410 if (span == NULL) {
3411 return NULL;
3412 }
3413
3414 // Allocate stack trace
3415 StackTrace *stack = stacktrace_allocator.New();
3416 if (stack == NULL) {
3417 // Sampling failed because of lack of memory
3418 return span;
3419 }
3420
3421 *stack = tmp;
3422 span->sample = 1;
3423 span->objects = stack;
3424 DLL_Prepend(&sampled_objects, span);
3425
3426 return span;
3427}
3428#endif
3429
3430static inline bool CheckCachedSizeClass(void *ptr) {
3431 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3432 size_t cached_value = pageheap->GetSizeClassIfCached(p);
3433 return cached_value == 0 ||
3434 cached_value == pageheap->GetDescriptor(p)->sizeclass;
3435}
3436
3437static inline void* CheckedMallocResult(void *result)
3438{
3439 ASSERT(result == 0 || CheckCachedSizeClass(result));
3440 return result;
3441}
3442
3443static inline void* SpanToMallocResult(Span *span) {
9dae56ea 3444 ASSERT_SPAN_COMMITTED(span);
b37bf2e1
A
3445 pageheap->CacheSizeClass(span->start, 0);
3446 return
3447 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
3448}
3449
9dae56ea
A
3450#ifdef WTF_CHANGES
3451template <bool crashOnFailure>
3452#endif
b37bf2e1
A
3453static ALWAYS_INLINE void* do_malloc(size_t size) {
3454 void* ret = NULL;
3455
3456#ifdef WTF_CHANGES
3457 ASSERT(!isForbidden());
3458#endif
3459
3460 // The following call forces module initialization
3461 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3462#ifndef WTF_CHANGES
3463 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
3464 Span* span = DoSampledAllocation(size);
3465 if (span != NULL) {
3466 ret = SpanToMallocResult(span);
3467 }
3468 } else
3469#endif
3470 if (size > kMaxSize) {
3471 // Use page-level allocator
3472 SpinLockHolder h(&pageheap_lock);
3473 Span* span = pageheap->New(pages(size));
3474 if (span != NULL) {
3475 ret = SpanToMallocResult(span);
3476 }
3477 } else {
3478 // The common case, and also the simplest. This just pops the
3479 // size-appropriate freelist, afer replenishing it if it's empty.
3480 ret = CheckedMallocResult(heap->Allocate(size));
3481 }
9dae56ea
A
3482 if (!ret) {
3483#ifdef WTF_CHANGES
3484 if (crashOnFailure) // This branch should be optimized out by the compiler.
3485 CRASH();
3486#else
3487 errno = ENOMEM;
3488#endif
3489 }
b37bf2e1
A
3490 return ret;
3491}
3492
3493static ALWAYS_INLINE void do_free(void* ptr) {
3494 if (ptr == NULL) return;
3495 ASSERT(pageheap != NULL); // Should not call free() before malloc()
3496 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3497 Span* span = NULL;
3498 size_t cl = pageheap->GetSizeClassIfCached(p);
3499
3500 if (cl == 0) {
3501 span = pageheap->GetDescriptor(p);
3502 cl = span->sizeclass;
3503 pageheap->CacheSizeClass(p, cl);
3504 }
3505 if (cl != 0) {
9dae56ea 3506#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1 3507 ASSERT(!pageheap->GetDescriptor(p)->sample);
9dae56ea 3508#endif
b37bf2e1
A
3509 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
3510 if (heap != NULL) {
3511 heap->Deallocate(ptr, cl);
3512 } else {
3513 // Delete directly into central cache
3514 SLL_SetNext(ptr, NULL);
3515 central_cache[cl].InsertRange(ptr, ptr, 1);
3516 }
3517 } else {
3518 SpinLockHolder h(&pageheap_lock);
3519 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
3520 ASSERT(span != NULL && span->start == p);
9dae56ea 3521#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1
A
3522 if (span->sample) {
3523 DLL_Remove(span);
3524 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
3525 span->objects = NULL;
3526 }
9dae56ea 3527#endif
b37bf2e1
A
3528 pageheap->Delete(span);
3529 }
3530}
3531
3532#ifndef WTF_CHANGES
3533// For use by exported routines below that want specific alignments
3534//
3535// Note: this code can be slow, and can significantly fragment memory.
3536// The expectation is that memalign/posix_memalign/valloc/pvalloc will
3537// not be invoked very often. This requirement simplifies our
3538// implementation and allows us to tune for expected allocation
3539// patterns.
3540static void* do_memalign(size_t align, size_t size) {
3541 ASSERT((align & (align - 1)) == 0);
3542 ASSERT(align > 0);
3543 if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
3544
3545 // Allocate at least one byte to avoid boundary conditions below
3546 if (size == 0) size = 1;
3547
3548 if (size <= kMaxSize && align < kPageSize) {
3549 // Search through acceptable size classes looking for one with
3550 // enough alignment. This depends on the fact that
3551 // InitSizeClasses() currently produces several size classes that
3552 // are aligned at powers of two. We will waste time and space if
3553 // we miss in the size class array, but that is deemed acceptable
3554 // since memalign() should be used rarely.
3555 size_t cl = SizeClass(size);
3556 while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
3557 cl++;
3558 }
3559 if (cl < kNumClasses) {
3560 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3561 return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
3562 }
3563 }
3564
3565 // We will allocate directly from the page heap
3566 SpinLockHolder h(&pageheap_lock);
3567
3568 if (align <= kPageSize) {
3569 // Any page-level allocation will be fine
3570 // TODO: We could put the rest of this page in the appropriate
3571 // TODO: cache but it does not seem worth it.
3572 Span* span = pageheap->New(pages(size));
3573 return span == NULL ? NULL : SpanToMallocResult(span);
3574 }
3575
3576 // Allocate extra pages and carve off an aligned portion
3577 const Length alloc = pages(size + align);
3578 Span* span = pageheap->New(alloc);
3579 if (span == NULL) return NULL;
3580
3581 // Skip starting portion so that we end up aligned
3582 Length skip = 0;
3583 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
3584 skip++;
3585 }
3586 ASSERT(skip < alloc);
3587 if (skip > 0) {
3588 Span* rest = pageheap->Split(span, skip);
3589 pageheap->Delete(span);
3590 span = rest;
3591 }
3592
3593 // Skip trailing portion that we do not need to return
3594 const Length needed = pages(size);
3595 ASSERT(span->length >= needed);
3596 if (span->length > needed) {
3597 Span* trailer = pageheap->Split(span, needed);
3598 pageheap->Delete(trailer);
3599 }
3600 return SpanToMallocResult(span);
3601}
3602#endif
3603
3604// Helpers for use by exported routines below:
3605
3606#ifndef WTF_CHANGES
3607static inline void do_malloc_stats() {
3608 PrintStats(1);
3609}
3610#endif
3611
3612static inline int do_mallopt(int, int) {
3613 return 1; // Indicates error
3614}
3615
3616#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3617static inline struct mallinfo do_mallinfo() {
3618 TCMallocStats stats;
3619 ExtractStats(&stats, NULL);
3620
3621 // Just some of the fields are filled in.
3622 struct mallinfo info;
3623 memset(&info, 0, sizeof(info));
3624
3625 // Unfortunately, the struct contains "int" field, so some of the
3626 // size values will be truncated.
3627 info.arena = static_cast<int>(stats.system_bytes);
3628 info.fsmblks = static_cast<int>(stats.thread_bytes
3629 + stats.central_bytes
3630 + stats.transfer_bytes);
3631 info.fordblks = static_cast<int>(stats.pageheap_bytes);
3632 info.uordblks = static_cast<int>(stats.system_bytes
3633 - stats.thread_bytes
3634 - stats.central_bytes
3635 - stats.transfer_bytes
3636 - stats.pageheap_bytes);
3637
3638 return info;
3639}
3640#endif
3641
3642//-------------------------------------------------------------------
3643// Exported routines
3644//-------------------------------------------------------------------
3645
3646// CAVEAT: The code structure below ensures that MallocHook methods are always
3647// called from the stack frame of the invoked allocation function.
3648// heap-checker.cc depends on this to start a stack trace from
3649// the call to the (de)allocation function.
3650
3651#ifndef WTF_CHANGES
3652extern "C"
9dae56ea
A
3653#else
3654#define do_malloc do_malloc<crashOnFailure>
3655
3656template <bool crashOnFailure>
3657void* malloc(size_t);
3658
3659void* fastMalloc(size_t size)
3660{
3661 return malloc<true>(size);
3662}
3663
f9bf01c6 3664TryMallocReturnValue tryFastMalloc(size_t size)
9dae56ea
A
3665{
3666 return malloc<false>(size);
3667}
3668
3669template <bool crashOnFailure>
3670ALWAYS_INLINE
b37bf2e1
A
3671#endif
3672void* malloc(size_t size) {
ba379fdc
A
3673#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3674 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= size) // If overflow would occur...
3675 return 0;
3676 size += sizeof(AllocAlignmentInteger);
3677 void* result = do_malloc(size);
3678 if (!result)
3679 return 0;
3680
3681 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3682 result = static_cast<AllocAlignmentInteger*>(result) + 1;
3683#else
3684 void* result = do_malloc(size);
3685#endif
3686
b37bf2e1
A
3687#ifndef WTF_CHANGES
3688 MallocHook::InvokeNewHook(result, size);
3689#endif
3690 return result;
3691}
3692
3693#ifndef WTF_CHANGES
3694extern "C"
3695#endif
3696void free(void* ptr) {
3697#ifndef WTF_CHANGES
3698 MallocHook::InvokeDeleteHook(ptr);
3699#endif
ba379fdc
A
3700
3701#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3702 if (!ptr)
3703 return;
3704
3705 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(ptr);
3706 if (*header != Internal::AllocTypeMalloc)
3707 Internal::fastMallocMatchFailed(ptr);
3708 do_free(header);
3709#else
3710 do_free(ptr);
3711#endif
b37bf2e1
A
3712}
3713
3714#ifndef WTF_CHANGES
3715extern "C"
9dae56ea
A
3716#else
3717template <bool crashOnFailure>
3718void* calloc(size_t, size_t);
3719
3720void* fastCalloc(size_t n, size_t elem_size)
3721{
3722 return calloc<true>(n, elem_size);
3723}
3724
f9bf01c6 3725TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
9dae56ea
A
3726{
3727 return calloc<false>(n, elem_size);
3728}
3729
3730template <bool crashOnFailure>
3731ALWAYS_INLINE
b37bf2e1
A
3732#endif
3733void* calloc(size_t n, size_t elem_size) {
ba379fdc 3734 size_t totalBytes = n * elem_size;
b37bf2e1
A
3735
3736 // Protect against overflow
3737 if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3738 return 0;
ba379fdc
A
3739
3740#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3741 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
3742 return 0;
3743
3744 totalBytes += sizeof(AllocAlignmentInteger);
3745 void* result = do_malloc(totalBytes);
3746 if (!result)
3747 return 0;
3748
b37bf2e1 3749 memset(result, 0, totalBytes);
ba379fdc
A
3750 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3751 result = static_cast<AllocAlignmentInteger*>(result) + 1;
3752#else
3753 void* result = do_malloc(totalBytes);
3754 if (result != NULL) {
3755 memset(result, 0, totalBytes);
3756 }
3757#endif
3758
b37bf2e1
A
3759#ifndef WTF_CHANGES
3760 MallocHook::InvokeNewHook(result, totalBytes);
3761#endif
3762 return result;
3763}
3764
9dae56ea
A
3765// Since cfree isn't used anywhere, we don't compile it in.
3766#ifndef WTF_CHANGES
b37bf2e1
A
3767#ifndef WTF_CHANGES
3768extern "C"
3769#endif
3770void cfree(void* ptr) {
3771#ifndef WTF_CHANGES
3772 MallocHook::InvokeDeleteHook(ptr);
3773#endif
3774 do_free(ptr);
3775}
9dae56ea 3776#endif
b37bf2e1
A
3777
3778#ifndef WTF_CHANGES
3779extern "C"
9dae56ea
A
3780#else
3781template <bool crashOnFailure>
3782void* realloc(void*, size_t);
3783
3784void* fastRealloc(void* old_ptr, size_t new_size)
3785{
3786 return realloc<true>(old_ptr, new_size);
3787}
3788
f9bf01c6 3789TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
9dae56ea
A
3790{
3791 return realloc<false>(old_ptr, new_size);
3792}
3793
3794template <bool crashOnFailure>
3795ALWAYS_INLINE
b37bf2e1
A
3796#endif
3797void* realloc(void* old_ptr, size_t new_size) {
3798 if (old_ptr == NULL) {
ba379fdc
A
3799#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3800 void* result = malloc(new_size);
3801#else
b37bf2e1
A
3802 void* result = do_malloc(new_size);
3803#ifndef WTF_CHANGES
3804 MallocHook::InvokeNewHook(result, new_size);
ba379fdc 3805#endif
b37bf2e1
A
3806#endif
3807 return result;
3808 }
3809 if (new_size == 0) {
3810#ifndef WTF_CHANGES
3811 MallocHook::InvokeDeleteHook(old_ptr);
3812#endif
3813 free(old_ptr);
3814 return NULL;
3815 }
3816
ba379fdc
A
3817#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3818 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= new_size) // If overflow would occur...
3819 return 0;
3820 new_size += sizeof(AllocAlignmentInteger);
3821 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(old_ptr);
3822 if (*header != Internal::AllocTypeMalloc)
3823 Internal::fastMallocMatchFailed(old_ptr);
3824 old_ptr = header;
3825#endif
3826
b37bf2e1
A
3827 // Get the size of the old entry
3828 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
3829 size_t cl = pageheap->GetSizeClassIfCached(p);
3830 Span *span = NULL;
3831 size_t old_size;
3832 if (cl == 0) {
3833 span = pageheap->GetDescriptor(p);
3834 cl = span->sizeclass;
3835 pageheap->CacheSizeClass(p, cl);
3836 }
3837 if (cl != 0) {
3838 old_size = ByteSizeForClass(cl);
3839 } else {
3840 ASSERT(span != NULL);
3841 old_size = span->length << kPageShift;
3842 }
3843
3844 // Reallocate if the new size is larger than the old size,
3845 // or if the new size is significantly smaller than the old size.
3846 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
3847 // Need to reallocate
3848 void* new_ptr = do_malloc(new_size);
3849 if (new_ptr == NULL) {
3850 return NULL;
3851 }
3852#ifndef WTF_CHANGES
3853 MallocHook::InvokeNewHook(new_ptr, new_size);
3854#endif
3855 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
3856#ifndef WTF_CHANGES
3857 MallocHook::InvokeDeleteHook(old_ptr);
3858#endif
3859 // We could use a variant of do_free() that leverages the fact
3860 // that we already know the sizeclass of old_ptr. The benefit
3861 // would be small, so don't bother.
3862 do_free(old_ptr);
ba379fdc
A
3863#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3864 new_ptr = static_cast<AllocAlignmentInteger*>(new_ptr) + 1;
3865#endif
b37bf2e1
A
3866 return new_ptr;
3867 } else {
ba379fdc 3868#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
f9bf01c6 3869 old_ptr = static_cast<AllocAlignmentInteger*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
ba379fdc 3870#endif
b37bf2e1
A
3871 return old_ptr;
3872 }
3873}
3874
9dae56ea
A
3875#ifdef WTF_CHANGES
3876#undef do_malloc
3877#else
b37bf2e1
A
3878
3879static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
3880
3881static inline void* cpp_alloc(size_t size, bool nothrow) {
3882 for (;;) {
3883 void* p = do_malloc(size);
3884#ifdef PREANSINEW
3885 return p;
3886#else
3887 if (p == NULL) { // allocation failed
3888 // Get the current new handler. NB: this function is not
3889 // thread-safe. We make a feeble stab at making it so here, but
3890 // this lock only protects against tcmalloc interfering with
3891 // itself, not with other libraries calling set_new_handler.
3892 std::new_handler nh;
3893 {
3894 SpinLockHolder h(&set_new_handler_lock);
3895 nh = std::set_new_handler(0);
3896 (void) std::set_new_handler(nh);
3897 }
3898 // If no new_handler is established, the allocation failed.
3899 if (!nh) {
3900 if (nothrow) return 0;
3901 throw std::bad_alloc();
3902 }
3903 // Otherwise, try the new_handler. If it returns, retry the
3904 // allocation. If it throws std::bad_alloc, fail the allocation.
3905 // if it throws something else, don't interfere.
3906 try {
3907 (*nh)();
3908 } catch (const std::bad_alloc&) {
3909 if (!nothrow) throw;
3910 return p;
3911 }
3912 } else { // allocation success
3913 return p;
3914 }
3915#endif
3916 }
3917}
3918
3919void* operator new(size_t size) {
3920 void* p = cpp_alloc(size, false);
3921 // We keep this next instruction out of cpp_alloc for a reason: when
3922 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3923 // new call into cpp_alloc, which messes up our whole section-based
3924 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3925 // isn't the last thing this fn calls, and prevents the folding.
3926 MallocHook::InvokeNewHook(p, size);
3927 return p;
3928}
3929
3930void* operator new(size_t size, const std::nothrow_t&) __THROW {
3931 void* p = cpp_alloc(size, true);
3932 MallocHook::InvokeNewHook(p, size);
3933 return p;
3934}
3935
3936void operator delete(void* p) __THROW {
3937 MallocHook::InvokeDeleteHook(p);
3938 do_free(p);
3939}
3940
3941void operator delete(void* p, const std::nothrow_t&) __THROW {
3942 MallocHook::InvokeDeleteHook(p);
3943 do_free(p);
3944}
3945
3946void* operator new[](size_t size) {
3947 void* p = cpp_alloc(size, false);
3948 // We keep this next instruction out of cpp_alloc for a reason: when
3949 // it's in, and new just calls cpp_alloc, the optimizer may fold the
3950 // new call into cpp_alloc, which messes up our whole section-based
3951 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
3952 // isn't the last thing this fn calls, and prevents the folding.
3953 MallocHook::InvokeNewHook(p, size);
3954 return p;
3955}
3956
3957void* operator new[](size_t size, const std::nothrow_t&) __THROW {
3958 void* p = cpp_alloc(size, true);
3959 MallocHook::InvokeNewHook(p, size);
3960 return p;
3961}
3962
3963void operator delete[](void* p) __THROW {
3964 MallocHook::InvokeDeleteHook(p);
3965 do_free(p);
3966}
3967
3968void operator delete[](void* p, const std::nothrow_t&) __THROW {
3969 MallocHook::InvokeDeleteHook(p);
3970 do_free(p);
3971}
3972
3973extern "C" void* memalign(size_t align, size_t size) __THROW {
3974 void* result = do_memalign(align, size);
3975 MallocHook::InvokeNewHook(result, size);
3976 return result;
3977}
3978
3979extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
3980 __THROW {
3981 if (((align % sizeof(void*)) != 0) ||
3982 ((align & (align - 1)) != 0) ||
3983 (align == 0)) {
3984 return EINVAL;
3985 }
3986
3987 void* result = do_memalign(align, size);
3988 MallocHook::InvokeNewHook(result, size);
3989 if (result == NULL) {
3990 return ENOMEM;
3991 } else {
3992 *result_ptr = result;
3993 return 0;
3994 }
3995}
3996
3997static size_t pagesize = 0;
3998
3999extern "C" void* valloc(size_t size) __THROW {
4000 // Allocate page-aligned object of length >= size bytes
4001 if (pagesize == 0) pagesize = getpagesize();
4002 void* result = do_memalign(pagesize, size);
4003 MallocHook::InvokeNewHook(result, size);
4004 return result;
4005}
4006
4007extern "C" void* pvalloc(size_t size) __THROW {
4008 // Round up size to a multiple of pagesize
4009 if (pagesize == 0) pagesize = getpagesize();
4010 size = (size + pagesize - 1) & ~(pagesize - 1);
4011 void* result = do_memalign(pagesize, size);
4012 MallocHook::InvokeNewHook(result, size);
4013 return result;
4014}
4015
4016extern "C" void malloc_stats(void) {
4017 do_malloc_stats();
4018}
4019
4020extern "C" int mallopt(int cmd, int value) {
4021 return do_mallopt(cmd, value);
4022}
4023
4024#ifdef HAVE_STRUCT_MALLINFO
4025extern "C" struct mallinfo mallinfo(void) {
4026 return do_mallinfo();
4027}
4028#endif
4029
4030//-------------------------------------------------------------------
4031// Some library routines on RedHat 9 allocate memory using malloc()
4032// and free it using __libc_free() (or vice-versa). Since we provide
4033// our own implementations of malloc/free, we need to make sure that
4034// the __libc_XXX variants (defined as part of glibc) also point to
4035// the same implementations.
4036//-------------------------------------------------------------------
4037
4038#if defined(__GLIBC__)
4039extern "C" {
ba379fdc 4040#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
b37bf2e1
A
4041 // Potentially faster variants that use the gcc alias extension.
4042 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
4043# define ALIAS(x) __attribute__ ((weak, alias (x)))
4044 void* __libc_malloc(size_t size) ALIAS("malloc");
4045 void __libc_free(void* ptr) ALIAS("free");
4046 void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
4047 void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
4048 void __libc_cfree(void* ptr) ALIAS("cfree");
4049 void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
4050 void* __libc_valloc(size_t size) ALIAS("valloc");
4051 void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
4052 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
4053# undef ALIAS
4054# else /* not __GNUC__ */
4055 // Portable wrappers
4056 void* __libc_malloc(size_t size) { return malloc(size); }
4057 void __libc_free(void* ptr) { free(ptr); }
4058 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
4059 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
4060 void __libc_cfree(void* ptr) { cfree(ptr); }
4061 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
4062 void* __libc_valloc(size_t size) { return valloc(size); }
4063 void* __libc_pvalloc(size_t size) { return pvalloc(size); }
4064 int __posix_memalign(void** r, size_t a, size_t s) {
4065 return posix_memalign(r, a, s);
4066 }
4067# endif /* __GNUC__ */
4068}
4069#endif /* __GLIBC__ */
4070
4071// Override __libc_memalign in libc on linux boxes specially.
4072// They have a bug in libc that causes them to (very rarely) allocate
4073// with __libc_memalign() yet deallocate with free() and the
4074// definitions above don't catch it.
4075// This function is an exception to the rule of calling MallocHook method
4076// from the stack frame of the allocation function;
4077// heap-checker handles this special case explicitly.
4078static void *MemalignOverride(size_t align, size_t size, const void *caller)
4079 __THROW {
4080 void* result = do_memalign(align, size);
4081 MallocHook::InvokeNewHook(result, size);
4082 return result;
4083}
4084void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
4085
4086#endif
4087
f9bf01c6 4088#if defined(WTF_CHANGES) && OS(DARWIN)
b37bf2e1
A
4089
4090class FreeObjectFinder {
4091 const RemoteMemoryReader& m_reader;
4092 HashSet<void*> m_freeObjects;
4093
4094public:
4095 FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
4096
4097 void visit(void* ptr) { m_freeObjects.add(ptr); }
4098 bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
ba379fdc 4099 bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
b37bf2e1
A
4100 size_t freeObjectCount() const { return m_freeObjects.size(); }
4101
4102 void findFreeObjects(TCMalloc_ThreadCache* threadCache)
4103 {
4104 for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
4105 threadCache->enumerateFreeObjects(*this, m_reader);
4106 }
4107
9dae56ea 4108 void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
b37bf2e1
A
4109 {
4110 for (unsigned i = 0; i < numSizes; i++)
9dae56ea 4111 centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
b37bf2e1
A
4112 }
4113};
4114
4115class PageMapFreeObjectFinder {
4116 const RemoteMemoryReader& m_reader;
4117 FreeObjectFinder& m_freeObjectFinder;
4118
4119public:
4120 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder)
4121 : m_reader(reader)
4122 , m_freeObjectFinder(freeObjectFinder)
4123 { }
4124
4125 int visit(void* ptr) const
4126 {
4127 if (!ptr)
4128 return 1;
4129
4130 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
4131 if (span->free) {
4132 void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
4133 m_freeObjectFinder.visit(ptr);
4134 } else if (span->sizeclass) {
4135 // Walk the free list of the small-object span, keeping track of each object seen
4136 for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject)))
4137 m_freeObjectFinder.visit(nextObject);
4138 }
4139 return span->length;
4140 }
4141};
4142
4143class PageMapMemoryUsageRecorder {
4144 task_t m_task;
4145 void* m_context;
4146 unsigned m_typeMask;
4147 vm_range_recorder_t* m_recorder;
4148 const RemoteMemoryReader& m_reader;
4149 const FreeObjectFinder& m_freeObjectFinder;
ba379fdc
A
4150
4151 HashSet<void*> m_seenPointers;
4152 Vector<Span*> m_coalescedSpans;
b37bf2e1
A
4153
4154public:
4155 PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
4156 : m_task(task)
4157 , m_context(context)
4158 , m_typeMask(typeMask)
4159 , m_recorder(recorder)
4160 , m_reader(reader)
4161 , m_freeObjectFinder(freeObjectFinder)
4162 { }
4163
ba379fdc
A
4164 ~PageMapMemoryUsageRecorder()
4165 {
4166 ASSERT(!m_coalescedSpans.size());
4167 }
4168
4169 void recordPendingRegions()
4170 {
4171 Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4172 vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
4173 ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
4174
4175 // Mark the memory region the spans represent as a candidate for containing pointers
4176 if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
4177 (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
4178
4179 if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
4180 m_coalescedSpans.clear();
4181 return;
4182 }
4183
4184 Vector<vm_range_t, 1024> allocatedPointers;
4185 for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
4186 Span *theSpan = m_coalescedSpans[i];
4187 if (theSpan->free)
4188 continue;
4189
4190 vm_address_t spanStartAddress = theSpan->start << kPageShift;
4191 vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
4192
4193 if (!theSpan->sizeclass) {
4194 // If it's an allocated large object span, mark it as in use
4195 if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
4196 allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
4197 } else {
4198 const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
4199
4200 // Mark each allocated small object within the span as in use
4201 const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
4202 for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
4203 if (!m_freeObjectFinder.isFreeObject(object))
4204 allocatedPointers.append((vm_range_t){object, objectSize});
4205 }
4206 }
4207 }
4208
4209 (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
4210
4211 m_coalescedSpans.clear();
4212 }
4213
4214 int visit(void* ptr)
b37bf2e1
A
4215 {
4216 if (!ptr)
4217 return 1;
4218
4219 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
ba379fdc
A
4220 if (!span->start)
4221 return 1;
4222
b37bf2e1
A
4223 if (m_seenPointers.contains(ptr))
4224 return span->length;
4225 m_seenPointers.add(ptr);
4226
ba379fdc
A
4227 if (!m_coalescedSpans.size()) {
4228 m_coalescedSpans.append(span);
4229 return span->length;
4230 }
b37bf2e1 4231
ba379fdc
A
4232 Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4233 vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
4234 vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
b37bf2e1 4235
ba379fdc
A
4236 // If the new span is adjacent to the previous span, do nothing for now.
4237 vm_address_t spanStartAddress = span->start << kPageShift;
4238 if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
4239 m_coalescedSpans.append(span);
4240 return span->length;
4241 }
b37bf2e1 4242
ba379fdc
A
4243 // New span is not adjacent to previous span, so record the spans coalesced so far.
4244 recordPendingRegions();
4245 m_coalescedSpans.append(span);
b37bf2e1 4246
ba379fdc
A
4247 return span->length;
4248 }
4249};
b37bf2e1 4250
ba379fdc
A
4251class AdminRegionRecorder {
4252 task_t m_task;
4253 void* m_context;
4254 unsigned m_typeMask;
4255 vm_range_recorder_t* m_recorder;
4256 const RemoteMemoryReader& m_reader;
4257
4258 Vector<vm_range_t, 1024> m_pendingRegions;
4259
4260public:
4261 AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
4262 : m_task(task)
4263 , m_context(context)
4264 , m_typeMask(typeMask)
4265 , m_recorder(recorder)
4266 , m_reader(reader)
4267 { }
4268
4269 void recordRegion(vm_address_t ptr, size_t size)
4270 {
4271 if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
4272 m_pendingRegions.append((vm_range_t){ ptr, size });
4273 }
4274
4275 void visit(void *ptr, size_t size)
4276 {
4277 recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
4278 }
4279
4280 void recordPendingRegions()
4281 {
4282 if (m_pendingRegions.size()) {
4283 (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
4284 m_pendingRegions.clear();
b37bf2e1 4285 }
ba379fdc 4286 }
b37bf2e1 4287
ba379fdc
A
4288 ~AdminRegionRecorder()
4289 {
4290 ASSERT(!m_pendingRegions.size());
b37bf2e1
A
4291 }
4292};
4293
4294kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
4295{
4296 RemoteMemoryReader memoryReader(task, reader);
4297
4298 InitSizeClasses();
4299
4300 FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
4301 TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
4302 TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
4303 TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
4304
4305 TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
4306
4307 FreeObjectFinder finder(memoryReader);
4308 finder.findFreeObjects(threadHeaps);
9dae56ea 4309 finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
b37bf2e1
A
4310
4311 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
4312 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
ba379fdc 4313 pageMap->visitValues(pageMapFinder, memoryReader);
b37bf2e1
A
4314
4315 PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
ba379fdc
A
4316 pageMap->visitValues(usageRecorder, memoryReader);
4317 usageRecorder.recordPendingRegions();
4318
4319 AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
4320 pageMap->visitAllocations(adminRegionRecorder, memoryReader);
4321
4322 PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
4323 PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
4324
4325 spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4326 pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4327
4328 adminRegionRecorder.recordPendingRegions();
b37bf2e1
A
4329
4330 return 0;
4331}
4332
4333size_t FastMallocZone::size(malloc_zone_t*, const void*)
4334{
4335 return 0;
4336}
4337
4338void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
4339{
4340 return 0;
4341}
4342
4343void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
4344{
4345 return 0;
4346}
4347
4348void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
4349{
4350 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
4351 // is not in this zone. When this happens, the pointer being freed was not allocated by any
4352 // zone so we need to print a useful error for the application developer.
4353 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
4354}
4355
4356void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
4357{
4358 return 0;
4359}
4360
4361
4362#undef malloc
4363#undef free
4364#undef realloc
4365#undef calloc
4366
4367extern "C" {
4368malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
9bcd318d 4369 &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
ba379fdc 4370
f9bf01c6 4371#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) || PLATFORM(IPHONE)
ba379fdc
A
4372 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
4373#endif
4374
9bcd318d 4375 };
b37bf2e1
A
4376}
4377
ba379fdc 4378FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
b37bf2e1
A
4379 : m_pageHeap(pageHeap)
4380 , m_threadHeaps(threadHeaps)
4381 , m_centralCaches(centralCaches)
ba379fdc
A
4382 , m_spanAllocator(spanAllocator)
4383 , m_pageHeapAllocator(pageHeapAllocator)
b37bf2e1
A
4384{
4385 memset(&m_zone, 0, sizeof(m_zone));
9bcd318d 4386 m_zone.version = 4;
b37bf2e1
A
4387 m_zone.zone_name = "JavaScriptCore FastMalloc";
4388 m_zone.size = &FastMallocZone::size;
4389 m_zone.malloc = &FastMallocZone::zoneMalloc;
4390 m_zone.calloc = &FastMallocZone::zoneCalloc;
4391 m_zone.realloc = &FastMallocZone::zoneRealloc;
4392 m_zone.free = &FastMallocZone::zoneFree;
4393 m_zone.valloc = &FastMallocZone::zoneValloc;
4394 m_zone.destroy = &FastMallocZone::zoneDestroy;
4395 m_zone.introspect = &jscore_fastmalloc_introspection;
4396 malloc_zone_register(&m_zone);
4397}
4398
4399
4400void FastMallocZone::init()
4401{
ba379fdc 4402 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
b37bf2e1
A
4403}
4404
f4e78d34
A
4405#endif
4406
9dae56ea 4407#if WTF_CHANGES
b37bf2e1
A
4408void releaseFastMallocFreeMemory()
4409{
b5422865 4410 // Flush free pages in the current thread cache back to the page heap.
9dae56ea
A
4411 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
4412 // The second pass flushes everything.
b5422865
A
4413 if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
4414 threadCache->Scavenge();
4415 threadCache->Scavenge();
4416 }
4417
b37bf2e1
A
4418 SpinLockHolder h(&pageheap_lock);
4419 pageheap->ReleaseFreePages();
4420}
9dae56ea
A
4421
4422FastMallocStatistics fastMallocStatistics()
4423{
4424 FastMallocStatistics statistics;
4425 {
4426 SpinLockHolder lockHolder(&pageheap_lock);
4427 statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes());
4428 statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes());
4429 statistics.returnedSize = pageheap->ReturnedBytes();
4430 statistics.freeSizeInCaches = 0;
4431 for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
4432 statistics.freeSizeInCaches += threadCache->Size();
4433 }
4434 for (unsigned cl = 0; cl < kNumClasses; ++cl) {
4435 const int length = central_cache[cl].length();
4436 const int tc_length = central_cache[cl].tc_length();
4437 statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length);
4438 }
4439 return statistics;
4440}
b37bf2e1 4441
b37bf2e1
A
4442} // namespace WTF
4443#endif
4444
f4e78d34 4445#endif // FORCE_SYSTEM_MALLOC