]> git.saurik.com Git - apple/javascriptcore.git/blame - wtf/FastMalloc.cpp
JavaScriptCore-903.tar.gz
[apple/javascriptcore.git] / wtf / FastMalloc.cpp
CommitLineData
b37bf2e1
A
1// Copyright (c) 2005, 2007, Google Inc.
2// All rights reserved.
14957cd0 3// Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
b37bf2e1
A
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// ---
32// Author: Sanjay Ghemawat <opensource@google.com>
33//
34// A malloc that uses a per-thread cache to satisfy small malloc requests.
35// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
36//
37// See doc/tcmalloc.html for a high-level
38// description of how this malloc works.
39//
40// SYNCHRONIZATION
41// 1. The thread-specific lists are accessed without acquiring any locks.
42// This is safe because each such list is only accessed by one thread.
43// 2. We have a lock per central free-list, and hold it while manipulating
44// the central free list for a particular size.
45// 3. The central page allocator is protected by "pageheap_lock".
46// 4. The pagemap (which maps from page-number to descriptor),
47// can be read without holding any locks, and written while holding
48// the "pageheap_lock".
49// 5. To improve performance, a subset of the information one can get
50// from the pagemap is cached in a data structure, pagemap_cache_,
51// that atomically reads and writes its entries. This cache can be
52// read and written without locking.
53//
54// This multi-threaded access to the pagemap is safe for fairly
55// subtle reasons. We basically assume that when an object X is
56// allocated by thread A and deallocated by thread B, there must
57// have been appropriate synchronization in the handoff of object
58// X from thread A to thread B. The same logic applies to pagemap_cache_.
59//
60// THE PAGEID-TO-SIZECLASS CACHE
61// Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62// returns 0 for a particular PageID then that means "no information," not that
63// the sizeclass is 0. The cache may have stale information for pages that do
64// not hold the beginning of any free()'able object. Staleness is eliminated
65// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66// do_memalign() for all other relevant pages.
67//
68// TODO: Bias reclamation to larger addresses
69// TODO: implement mallinfo/mallopt
70// TODO: Better testing
71//
72// 9/28/2003 (new page-level allocator replaces ptmalloc2):
73// * malloc/free of small objects goes from ~300 ns to ~50 ns.
74// * allocation of a reasonably complicated struct
75// goes from about 1100 ns to about 300 ns.
76
77#include "config.h"
78#include "FastMalloc.h"
79
80#include "Assertions.h"
ba379fdc 81#include <limits>
14957cd0 82#if ENABLE(WTF_MULTIPLE_THREADS)
b37bf2e1
A
83#include <pthread.h>
84#endif
14957cd0 85#include <wtf/StdLibExtras.h>
b37bf2e1
A
86
87#ifndef NO_TCMALLOC_SAMPLES
88#ifdef WTF_CHANGES
89#define NO_TCMALLOC_SAMPLES
90#endif
91#endif
92
f9bf01c6 93#if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
b37bf2e1
A
94#define FORCE_SYSTEM_MALLOC 0
95#else
96#define FORCE_SYSTEM_MALLOC 1
97#endif
98
ba379fdc 99// Use a background thread to periodically scavenge memory to release back to the system
ba379fdc 100#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
9dae56ea 101
b37bf2e1
A
102#ifndef NDEBUG
103namespace WTF {
104
14957cd0 105#if ENABLE(WTF_MULTIPLE_THREADS)
b37bf2e1
A
106static pthread_key_t isForbiddenKey;
107static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
108static void initializeIsForbiddenKey()
109{
110 pthread_key_create(&isForbiddenKey, 0);
111}
112
f9bf01c6 113#if !ASSERT_DISABLED
b37bf2e1
A
114static bool isForbidden()
115{
116 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
117 return !!pthread_getspecific(isForbiddenKey);
118}
f9bf01c6 119#endif
b37bf2e1
A
120
121void fastMallocForbid()
122{
123 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
124 pthread_setspecific(isForbiddenKey, &isForbiddenKey);
125}
126
127void fastMallocAllow()
128{
129 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
130 pthread_setspecific(isForbiddenKey, 0);
131}
132
133#else
134
135static bool staticIsForbidden;
136static bool isForbidden()
137{
138 return staticIsForbidden;
139}
140
141void fastMallocForbid()
142{
143 staticIsForbidden = true;
144}
145
146void fastMallocAllow()
147{
148 staticIsForbidden = false;
149}
14957cd0 150#endif // ENABLE(WTF_MULTIPLE_THREADS)
b37bf2e1
A
151
152} // namespace WTF
153#endif // NDEBUG
154
155#include <string.h>
156
157namespace WTF {
9dae56ea 158
ba379fdc
A
159
160namespace Internal {
14957cd0
A
161#if !ENABLE(WTF_MALLOC_VALIDATION)
162void fastMallocMatchFailed(void*);
163#else
164COMPILE_ASSERT(((sizeof(ValidationHeader) % sizeof(AllocAlignmentInteger)) == 0), ValidationHeader_must_produce_correct_alignment);
165#endif
ba379fdc
A
166void fastMallocMatchFailed(void*)
167{
168 CRASH();
169}
170
171} // namespace Internal
172
ba379fdc 173
9dae56ea 174void* fastZeroedMalloc(size_t n)
b37bf2e1 175{
9dae56ea 176 void* result = fastMalloc(n);
b37bf2e1 177 memset(result, 0, n);
b37bf2e1
A
178 return result;
179}
f9bf01c6
A
180
181char* fastStrDup(const char* src)
182{
14957cd0 183 size_t len = strlen(src) + 1;
f9bf01c6 184 char* dup = static_cast<char*>(fastMalloc(len));
14957cd0 185 memcpy(dup, src, len);
f9bf01c6
A
186 return dup;
187}
14957cd0 188
f9bf01c6 189TryMallocReturnValue tryFastZeroedMalloc(size_t n)
9dae56ea 190{
f9bf01c6
A
191 void* result;
192 if (!tryFastMalloc(n).getValue(result))
9dae56ea
A
193 return 0;
194 memset(result, 0, n);
195 return result;
b37bf2e1
A
196}
197
9dae56ea
A
198} // namespace WTF
199
b37bf2e1
A
200#if FORCE_SYSTEM_MALLOC
201
4e4e5a6f
A
202#if PLATFORM(BREWMP)
203#include "brew/SystemMallocBrew.h"
204#endif
205
206#if OS(DARWIN)
207#include <malloc/malloc.h>
14957cd0 208#elif OS(WINDOWS)
4e4e5a6f
A
209#include <malloc.h>
210#endif
211
b37bf2e1 212namespace WTF {
9dae56ea 213
f9bf01c6 214TryMallocReturnValue tryFastMalloc(size_t n)
b37bf2e1
A
215{
216 ASSERT(!isForbidden());
ba379fdc 217
14957cd0
A
218#if ENABLE(WTF_MALLOC_VALIDATION)
219 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
ba379fdc
A
220 return 0;
221
14957cd0 222 void* result = malloc(n + Internal::ValidationBufferSize);
ba379fdc
A
223 if (!result)
224 return 0;
14957cd0
A
225 Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
226 header->m_size = n;
227 header->m_type = Internal::AllocTypeMalloc;
228 header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
229 result = header + 1;
230 *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
231 fastMallocValidate(result);
ba379fdc
A
232 return result;
233#else
b37bf2e1 234 return malloc(n);
ba379fdc 235#endif
b37bf2e1
A
236}
237
9dae56ea
A
238void* fastMalloc(size_t n)
239{
240 ASSERT(!isForbidden());
ba379fdc 241
14957cd0 242#if ENABLE(WTF_MALLOC_VALIDATION)
f9bf01c6
A
243 TryMallocReturnValue returnValue = tryFastMalloc(n);
244 void* result;
14957cd0
A
245 if (!returnValue.getValue(result))
246 CRASH();
ba379fdc 247#else
9dae56ea 248 void* result = malloc(n);
ba379fdc
A
249#endif
250
4e4e5a6f
A
251 if (!result) {
252#if PLATFORM(BREWMP)
253 // The behavior of malloc(0) is implementation defined.
254 // To make sure that fastMalloc never returns 0, retry with fastMalloc(1).
255 if (!n)
256 return fastMalloc(1);
257#endif
9dae56ea 258 CRASH();
4e4e5a6f
A
259 }
260
9dae56ea
A
261 return result;
262}
263
f9bf01c6 264TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
b37bf2e1
A
265{
266 ASSERT(!isForbidden());
ba379fdc 267
14957cd0 268#if ENABLE(WTF_MALLOC_VALIDATION)
ba379fdc 269 size_t totalBytes = n_elements * element_size;
14957cd0 270 if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements)
ba379fdc
A
271 return 0;
272
14957cd0
A
273 TryMallocReturnValue returnValue = tryFastMalloc(totalBytes);
274 void* result;
275 if (!returnValue.getValue(result))
ba379fdc 276 return 0;
ba379fdc 277 memset(result, 0, totalBytes);
14957cd0 278 fastMallocValidate(result);
ba379fdc
A
279 return result;
280#else
b37bf2e1 281 return calloc(n_elements, element_size);
ba379fdc 282#endif
b37bf2e1
A
283}
284
9dae56ea
A
285void* fastCalloc(size_t n_elements, size_t element_size)
286{
287 ASSERT(!isForbidden());
ba379fdc 288
14957cd0 289#if ENABLE(WTF_MALLOC_VALIDATION)
f9bf01c6
A
290 TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
291 void* result;
14957cd0
A
292 if (!returnValue.getValue(result))
293 CRASH();
ba379fdc 294#else
9dae56ea 295 void* result = calloc(n_elements, element_size);
ba379fdc
A
296#endif
297
4e4e5a6f
A
298 if (!result) {
299#if PLATFORM(BREWMP)
300 // If either n_elements or element_size is 0, the behavior of calloc is implementation defined.
301 // To make sure that fastCalloc never returns 0, retry with fastCalloc(1, 1).
302 if (!n_elements || !element_size)
303 return fastCalloc(1, 1);
304#endif
9dae56ea 305 CRASH();
4e4e5a6f
A
306 }
307
9dae56ea
A
308 return result;
309}
310
b37bf2e1
A
311void fastFree(void* p)
312{
313 ASSERT(!isForbidden());
ba379fdc 314
14957cd0 315#if ENABLE(WTF_MALLOC_VALIDATION)
ba379fdc
A
316 if (!p)
317 return;
14957cd0
A
318
319 fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc);
320 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
321 memset(p, 0xCC, header->m_size);
ba379fdc
A
322 free(header);
323#else
b37bf2e1 324 free(p);
ba379fdc 325#endif
b37bf2e1
A
326}
327
f9bf01c6 328TryMallocReturnValue tryFastRealloc(void* p, size_t n)
b37bf2e1
A
329{
330 ASSERT(!isForbidden());
ba379fdc 331
14957cd0 332#if ENABLE(WTF_MALLOC_VALIDATION)
ba379fdc 333 if (p) {
14957cd0 334 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
ba379fdc 335 return 0;
14957cd0
A
336 fastMallocValidate(p);
337 Internal::ValidationHeader* result = static_cast<Internal::ValidationHeader*>(realloc(Internal::fastMallocValidationHeader(p), n + Internal::ValidationBufferSize));
ba379fdc
A
338 if (!result)
339 return 0;
14957cd0
A
340 result->m_size = n;
341 result = result + 1;
342 *fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
343 fastMallocValidate(result);
ba379fdc
A
344 return result;
345 } else {
346 return fastMalloc(n);
347 }
348#else
b37bf2e1 349 return realloc(p, n);
ba379fdc 350#endif
b37bf2e1
A
351}
352
9dae56ea
A
353void* fastRealloc(void* p, size_t n)
354{
355 ASSERT(!isForbidden());
ba379fdc 356
14957cd0 357#if ENABLE(WTF_MALLOC_VALIDATION)
f9bf01c6
A
358 TryMallocReturnValue returnValue = tryFastRealloc(p, n);
359 void* result;
14957cd0
A
360 if (!returnValue.getValue(result))
361 CRASH();
ba379fdc 362#else
9dae56ea 363 void* result = realloc(p, n);
ba379fdc
A
364#endif
365
9dae56ea
A
366 if (!result)
367 CRASH();
368 return result;
369}
370
b37bf2e1 371void releaseFastMallocFreeMemory() { }
9dae56ea
A
372
373FastMallocStatistics fastMallocStatistics()
374{
4e4e5a6f 375 FastMallocStatistics statistics = { 0, 0, 0 };
9dae56ea
A
376 return statistics;
377}
f4e78d34 378
4e4e5a6f
A
379size_t fastMallocSize(const void* p)
380{
14957cd0
A
381#if ENABLE(WTF_MALLOC_VALIDATION)
382 return Internal::fastMallocValidationHeader(const_cast<void*>(p))->m_size;
383#elif OS(DARWIN)
4e4e5a6f 384 return malloc_size(p);
14957cd0
A
385#elif OS(WINDOWS) && !PLATFORM(BREWMP)
386 // Brew MP uses its own memory allocator, so _msize does not work on the Brew MP simulator.
4e4e5a6f
A
387 return _msize(const_cast<void*>(p));
388#else
389 return 1;
390#endif
391}
392
f4e78d34 393} // namespace WTF
b37bf2e1 394
f9bf01c6 395#if OS(DARWIN)
b37bf2e1
A
396// This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
397// It will never be used in this case, so it's type and value are less interesting than its presence.
398extern "C" const int jscore_fastmalloc_introspection = 0;
399#endif
400
f4e78d34 401#else // FORCE_SYSTEM_MALLOC
b37bf2e1
A
402
403#if HAVE(STDINT_H)
404#include <stdint.h>
405#elif HAVE(INTTYPES_H)
406#include <inttypes.h>
407#else
408#include <sys/types.h>
409#endif
410
411#include "AlwaysInline.h"
412#include "Assertions.h"
413#include "TCPackedCache.h"
414#include "TCPageMap.h"
415#include "TCSpinLock.h"
416#include "TCSystemAlloc.h"
417#include <algorithm>
ba379fdc 418#include <limits>
b37bf2e1
A
419#include <pthread.h>
420#include <stdarg.h>
421#include <stddef.h>
422#include <stdio.h>
14957cd0
A
423#if HAVE(ERRNO_H)
424#include <errno.h>
425#endif
f9bf01c6
A
426#if OS(UNIX)
427#include <unistd.h>
428#endif
14957cd0 429#if OS(WINDOWS)
b37bf2e1
A
430#ifndef WIN32_LEAN_AND_MEAN
431#define WIN32_LEAN_AND_MEAN
432#endif
433#include <windows.h>
434#endif
435
4e4e5a6f 436#ifdef WTF_CHANGES
b37bf2e1 437
f9bf01c6 438#if OS(DARWIN)
b37bf2e1 439#include "MallocZoneSupport.h"
9dae56ea 440#include <wtf/HashSet.h>
f9bf01c6
A
441#include <wtf/Vector.h>
442#endif
14957cd0
A
443
444#if HAVE(HEADER_DETECTION_H)
445#include "HeaderDetection.h"
446#endif
447
f9bf01c6
A
448#if HAVE(DISPATCH_H)
449#include <dispatch/dispatch.h>
b37bf2e1
A
450#endif
451
14957cd0
A
452#if HAVE(PTHREAD_MACHDEP_H)
453#include <System/pthread_machdep.h>
454
455#if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
456#define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
457#endif
458#endif
f9bf01c6 459
b37bf2e1
A
460#ifndef PRIuS
461#define PRIuS "zu"
462#endif
463
464// Calling pthread_getspecific through a global function pointer is faster than a normal
465// call to the function on Mac OS X, and it's used in performance-critical code. So we
466// use a function pointer. But that's not necessarily faster on other platforms, and we had
467// problems with this technique on Windows, so we'll do this only on Mac OS X.
f9bf01c6 468#if OS(DARWIN)
14957cd0 469#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
b37bf2e1
A
470static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
471#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
14957cd0
A
472#else
473#define pthread_getspecific(key) _pthread_getspecific_direct(key)
474#define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val))
b37bf2e1 475#endif
4e4e5a6f 476#endif
b37bf2e1
A
477
478#define DEFINE_VARIABLE(type, name, value, meaning) \
479 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
480 type FLAGS_##name(value); \
481 char FLAGS_no##name; \
482 } \
483 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
484
485#define DEFINE_int64(name, value, meaning) \
486 DEFINE_VARIABLE(int64_t, name, value, meaning)
487
488#define DEFINE_double(name, value, meaning) \
489 DEFINE_VARIABLE(double, name, value, meaning)
490
491namespace WTF {
492
493#define malloc fastMalloc
494#define calloc fastCalloc
495#define free fastFree
496#define realloc fastRealloc
497
498#define MESSAGE LOG_ERROR
499#define CHECK_CONDITION ASSERT
500
f9bf01c6 501#if OS(DARWIN)
4e4e5a6f 502struct Span;
ba379fdc 503class TCMalloc_Central_FreeListPadded;
b37bf2e1
A
504class TCMalloc_PageHeap;
505class TCMalloc_ThreadCache;
ba379fdc 506template <typename T> class PageHeapAllocator;
b37bf2e1
A
507
508class FastMallocZone {
509public:
510 static void init();
511
512 static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
513 static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
514 static boolean_t check(malloc_zone_t*) { return true; }
515 static void print(malloc_zone_t*, boolean_t) { }
516 static void log(malloc_zone_t*, void*) { }
517 static void forceLock(malloc_zone_t*) { }
518 static void forceUnlock(malloc_zone_t*) { }
9dae56ea 519 static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
b37bf2e1
A
520
521private:
ba379fdc 522 FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
b37bf2e1
A
523 static size_t size(malloc_zone_t*, const void*);
524 static void* zoneMalloc(malloc_zone_t*, size_t);
525 static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
526 static void zoneFree(malloc_zone_t*, void*);
527 static void* zoneRealloc(malloc_zone_t*, void*, size_t);
528 static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
529 static void zoneDestroy(malloc_zone_t*) { }
530
531 malloc_zone_t m_zone;
532 TCMalloc_PageHeap* m_pageHeap;
533 TCMalloc_ThreadCache** m_threadHeaps;
534 TCMalloc_Central_FreeListPadded* m_centralCaches;
ba379fdc
A
535 PageHeapAllocator<Span>* m_spanAllocator;
536 PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
b37bf2e1
A
537};
538
539#endif
540
541#endif
542
543#ifndef WTF_CHANGES
544// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
545// you're porting to a system where you really can't get a stacktrace.
546#ifdef NO_TCMALLOC_SAMPLES
547// We use #define so code compiles even if you #include stacktrace.h somehow.
548# define GetStackTrace(stack, depth, skip) (0)
549#else
550# include <google/stacktrace.h>
551#endif
552#endif
553
554// Even if we have support for thread-local storage in the compiler
555// and linker, the OS may not support it. We need to check that at
556// runtime. Right now, we have to keep a manual set of "bad" OSes.
557#if defined(HAVE_TLS)
558 static bool kernel_supports_tls = false; // be conservative
559 static inline bool KernelSupportsTLS() {
560 return kernel_supports_tls;
561 }
562# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
563 static void CheckIfKernelSupportsTLS() {
564 kernel_supports_tls = false;
565 }
566# else
567# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
568 static void CheckIfKernelSupportsTLS() {
569 struct utsname buf;
570 if (uname(&buf) != 0) { // should be impossible
571 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
572 kernel_supports_tls = false;
573 } else if (strcasecmp(buf.sysname, "linux") == 0) {
574 // The linux case: the first kernel to support TLS was 2.6.0
575 if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
576 kernel_supports_tls = false;
577 else if (buf.release[0] == '2' && buf.release[1] == '.' &&
578 buf.release[2] >= '0' && buf.release[2] < '6' &&
579 buf.release[3] == '.') // 2.0 - 2.5
580 kernel_supports_tls = false;
581 else
582 kernel_supports_tls = true;
583 } else { // some other kernel, we'll be optimisitic
584 kernel_supports_tls = true;
585 }
586 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
587 }
588# endif // HAVE_DECL_UNAME
589#endif // HAVE_TLS
590
591// __THROW is defined in glibc systems. It means, counter-intuitively,
592// "This function will never throw an exception." It's an optional
593// optimization tool, but we may need to use it to match glibc prototypes.
594#ifndef __THROW // I guess we're not on a glibc system
595# define __THROW // __THROW is just an optimization, so ok to make it ""
596#endif
597
598//-------------------------------------------------------------------
599// Configuration
600//-------------------------------------------------------------------
601
602// Not all possible combinations of the following parameters make
603// sense. In particular, if kMaxSize increases, you may have to
604// increase kNumClasses as well.
605static const size_t kPageShift = 12;
606static const size_t kPageSize = 1 << kPageShift;
607static const size_t kMaxSize = 8u * kPageSize;
608static const size_t kAlignShift = 3;
609static const size_t kAlignment = 1 << kAlignShift;
610static const size_t kNumClasses = 68;
611
612// Allocates a big block of memory for the pagemap once we reach more than
613// 128MB
614static const size_t kPageMapBigAllocationThreshold = 128 << 20;
615
616// Minimum number of pages to fetch from system at a time. Must be
f9bf01c6 617// significantly bigger than kPageSize to amortize system-call
b37bf2e1
A
618// overhead, and also to reduce external fragementation. Also, we
619// should keep this value big because various incarnations of Linux
620// have small limits on the number of mmap() regions per
621// address-space.
622static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
623
624// Number of objects to move between a per-thread list and a central
625// list in one shot. We want this to be not too small so we can
626// amortize the lock overhead for accessing the central list. Making
627// it too big may temporarily cause unnecessary memory wastage in the
628// per-thread free list until the scavenger cleans up the list.
629static int num_objects_to_move[kNumClasses];
630
631// Maximum length we allow a per-thread free-list to have before we
632// move objects from it into the corresponding central free-list. We
633// want this big to avoid locking the central free-list too often. It
634// should not hurt to make this list somewhat big because the
635// scavenging code will shrink it down when its contents are not in use.
636static const int kMaxFreeListLength = 256;
637
638// Lower and upper bounds on the per-thread cache sizes
639static const size_t kMinThreadCacheSize = kMaxSize * 2;
b5422865 640static const size_t kMaxThreadCacheSize = 512 * 1024;
b37bf2e1
A
641
642// Default bound on the total amount of thread caches
643static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
644
645// For all span-lengths < kMaxPages we keep an exact-size list.
646// REQUIRED: kMaxPages >= kMinSystemAlloc;
647static const size_t kMaxPages = kMinSystemAlloc;
648
649/* The smallest prime > 2^n */
650static int primes_list[] = {
651 // Small values might cause high rates of sampling
652 // and hence commented out.
653 // 2, 5, 11, 17, 37, 67, 131, 257,
654 // 521, 1031, 2053, 4099, 8209, 16411,
655 32771, 65537, 131101, 262147, 524309, 1048583,
656 2097169, 4194319, 8388617, 16777259, 33554467 };
657
658// Twice the approximate gap between sampling actions.
659// I.e., we take one sample approximately once every
660// tcmalloc_sample_parameter/2
661// bytes of allocation, i.e., ~ once every 128KB.
662// Must be a prime number.
663#ifdef NO_TCMALLOC_SAMPLES
664DEFINE_int64(tcmalloc_sample_parameter, 0,
665 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
666static size_t sample_period = 0;
667#else
668DEFINE_int64(tcmalloc_sample_parameter, 262147,
669 "Twice the approximate gap between sampling actions."
670 " Must be a prime number. Otherwise will be rounded up to a "
671 " larger prime number");
672static size_t sample_period = 262147;
673#endif
674
675// Protects sample_period above
676static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
677
678// Parameters for controlling how fast memory is returned to the OS.
679
680DEFINE_double(tcmalloc_release_rate, 1,
681 "Rate at which we release unused memory to the system. "
682 "Zero means we never release memory back to the system. "
683 "Increase this flag to return memory faster; decrease it "
684 "to return memory slower. Reasonable rates are in the "
685 "range [0,10]");
686
687//-------------------------------------------------------------------
688// Mapping from size to size_class and vice versa
689//-------------------------------------------------------------------
690
691// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
692// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
693// So for these larger sizes we have an array indexed by ceil(size/128).
694//
695// We flatten both logical arrays into one physical array and use
696// arithmetic to compute an appropriate index. The constants used by
697// ClassIndex() were selected to make the flattening work.
698//
699// Examples:
700// Size Expression Index
701// -------------------------------------------------------
702// 0 (0 + 7) / 8 0
703// 1 (1 + 7) / 8 1
704// ...
705// 1024 (1024 + 7) / 8 128
706// 1025 (1025 + 127 + (120<<7)) / 128 129
707// ...
708// 32768 (32768 + 127 + (120<<7)) / 128 376
709static const size_t kMaxSmallSize = 1024;
710static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
711static const int add_amount[2] = { 7, 127 + (120 << 7) };
712static unsigned char class_array[377];
713
714// Compute index of the class_array[] entry for a given size
715static inline int ClassIndex(size_t s) {
716 const int i = (s > kMaxSmallSize);
717 return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
718}
719
720// Mapping from size class to max size storable in that class
721static size_t class_to_size[kNumClasses];
722
723// Mapping from size class to number of pages to allocate at a time
724static size_t class_to_pages[kNumClasses];
725
726// TransferCache is used to cache transfers of num_objects_to_move[size_class]
727// back and forth between thread caches and the central cache for a given size
728// class.
729struct TCEntry {
730 void *head; // Head of chain of objects.
731 void *tail; // Tail of chain of objects.
732};
733// A central cache freelist can have anywhere from 0 to kNumTransferEntries
734// slots to put link list chains into. To keep memory usage bounded the total
735// number of TCEntries across size classes is fixed. Currently each size
736// class is initially given one TCEntry which also means that the maximum any
737// one class can have is kNumClasses.
738static const int kNumTransferEntries = kNumClasses;
739
740// Note: the following only works for "n"s that fit in 32-bits, but
741// that is fine since we only use it for small sizes.
742static inline int LgFloor(size_t n) {
743 int log = 0;
744 for (int i = 4; i >= 0; --i) {
745 int shift = (1 << i);
746 size_t x = n >> shift;
747 if (x != 0) {
748 n = x;
749 log += shift;
750 }
751 }
752 ASSERT(n == 1);
753 return log;
754}
755
756// Some very basic linked list functions for dealing with using void * as
757// storage.
758
759static inline void *SLL_Next(void *t) {
760 return *(reinterpret_cast<void**>(t));
761}
762
763static inline void SLL_SetNext(void *t, void *n) {
764 *(reinterpret_cast<void**>(t)) = n;
765}
766
767static inline void SLL_Push(void **list, void *element) {
768 SLL_SetNext(element, *list);
769 *list = element;
770}
771
772static inline void *SLL_Pop(void **list) {
773 void *result = *list;
774 *list = SLL_Next(*list);
775 return result;
776}
777
778
779// Remove N elements from a linked list to which head points. head will be
780// modified to point to the new head. start and end will point to the first
781// and last nodes of the range. Note that end will point to NULL after this
782// function is called.
783static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
784 if (N == 0) {
785 *start = NULL;
786 *end = NULL;
787 return;
788 }
789
790 void *tmp = *head;
791 for (int i = 1; i < N; ++i) {
792 tmp = SLL_Next(tmp);
793 }
794
795 *start = *head;
796 *end = tmp;
797 *head = SLL_Next(tmp);
798 // Unlink range from list.
799 SLL_SetNext(tmp, NULL);
800}
801
802static inline void SLL_PushRange(void **head, void *start, void *end) {
803 if (!start) return;
804 SLL_SetNext(end, *head);
805 *head = start;
806}
807
808static inline size_t SLL_Size(void *head) {
809 int count = 0;
810 while (head) {
811 count++;
812 head = SLL_Next(head);
813 }
814 return count;
815}
816
817// Setup helper functions.
818
819static ALWAYS_INLINE size_t SizeClass(size_t size) {
820 return class_array[ClassIndex(size)];
821}
822
823// Get the byte-size for a specified class
824static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
825 return class_to_size[cl];
826}
827static int NumMoveSize(size_t size) {
828 if (size == 0) return 0;
829 // Use approx 64k transfers between thread and central caches.
830 int num = static_cast<int>(64.0 * 1024.0 / size);
831 if (num < 2) num = 2;
832 // Clamp well below kMaxFreeListLength to avoid ping pong between central
833 // and thread caches.
834 if (num > static_cast<int>(0.8 * kMaxFreeListLength))
835 num = static_cast<int>(0.8 * kMaxFreeListLength);
836
837 // Also, avoid bringing in too many objects into small object free
838 // lists. There are lots of such lists, and if we allow each one to
839 // fetch too many at a time, we end up having to scavenge too often
840 // (especially when there are lots of threads and each thread gets a
841 // small allowance for its thread cache).
842 //
843 // TODO: Make thread cache free list sizes dynamic so that we do not
844 // have to equally divide a fixed resource amongst lots of threads.
845 if (num > 32) num = 32;
846
847 return num;
848}
849
850// Initialize the mapping arrays
851static void InitSizeClasses() {
852 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
853 if (ClassIndex(0) < 0) {
854 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
9dae56ea 855 CRASH();
b37bf2e1
A
856 }
857 if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
858 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
9dae56ea 859 CRASH();
b37bf2e1
A
860 }
861
862 // Compute the size classes we want to use
863 size_t sc = 1; // Next size class to assign
864 unsigned char alignshift = kAlignShift;
865 int last_lg = -1;
866 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
867 int lg = LgFloor(size);
868 if (lg > last_lg) {
869 // Increase alignment every so often.
870 //
871 // Since we double the alignment every time size doubles and
872 // size >= 128, this means that space wasted due to alignment is
873 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
874 // bytes, so the space wasted as a percentage starts falling for
875 // sizes > 2K.
876 if ((lg >= 7) && (alignshift < 8)) {
877 alignshift++;
878 }
879 last_lg = lg;
880 }
881
882 // Allocate enough pages so leftover is less than 1/8 of total.
883 // This bounds wasted space to at most 12.5%.
884 size_t psize = kPageSize;
885 while ((psize % size) > (psize >> 3)) {
886 psize += kPageSize;
887 }
888 const size_t my_pages = psize >> kPageShift;
889
890 if (sc > 1 && my_pages == class_to_pages[sc-1]) {
891 // See if we can merge this into the previous class without
892 // increasing the fragmentation of the previous class.
893 const size_t my_objects = (my_pages << kPageShift) / size;
894 const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
895 / class_to_size[sc-1];
896 if (my_objects == prev_objects) {
897 // Adjust last class to include this size
898 class_to_size[sc-1] = size;
899 continue;
900 }
901 }
902
903 // Add new class
904 class_to_pages[sc] = my_pages;
905 class_to_size[sc] = size;
906 sc++;
907 }
908 if (sc != kNumClasses) {
909 MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
910 sc, int(kNumClasses));
9dae56ea 911 CRASH();
b37bf2e1
A
912 }
913
914 // Initialize the mapping arrays
915 int next_size = 0;
916 for (unsigned char c = 1; c < kNumClasses; c++) {
917 const size_t max_size_in_class = class_to_size[c];
918 for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
919 class_array[ClassIndex(s)] = c;
920 }
921 next_size = static_cast<int>(max_size_in_class + kAlignment);
922 }
923
924 // Double-check sizes just to be safe
925 for (size_t size = 0; size <= kMaxSize; size++) {
926 const size_t sc = SizeClass(size);
927 if (sc == 0) {
928 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
9dae56ea 929 CRASH();
b37bf2e1
A
930 }
931 if (sc > 1 && size <= class_to_size[sc-1]) {
932 MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
933 "\n", sc, size);
9dae56ea 934 CRASH();
b37bf2e1
A
935 }
936 if (sc >= kNumClasses) {
937 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
9dae56ea 938 CRASH();
b37bf2e1
A
939 }
940 const size_t s = class_to_size[sc];
941 if (size > s) {
942 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
9dae56ea 943 CRASH();
b37bf2e1
A
944 }
945 if (s == 0) {
946 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
9dae56ea 947 CRASH();
b37bf2e1
A
948 }
949 }
950
951 // Initialize the num_objects_to_move array.
952 for (size_t cl = 1; cl < kNumClasses; ++cl) {
953 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
954 }
955
956#ifndef WTF_CHANGES
957 if (false) {
958 // Dump class sizes and maximum external wastage per size class
959 for (size_t cl = 1; cl < kNumClasses; ++cl) {
960 const int alloc_size = class_to_pages[cl] << kPageShift;
961 const int alloc_objs = alloc_size / class_to_size[cl];
962 const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
963 const int max_waste = alloc_size - min_used;
964 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
965 int(cl),
966 int(class_to_size[cl-1] + 1),
967 int(class_to_size[cl]),
968 int(class_to_pages[cl] << kPageShift),
969 max_waste * 100.0 / alloc_size
970 );
971 }
972 }
973#endif
974}
975
976// -------------------------------------------------------------------------
977// Simple allocator for objects of a specified type. External locking
978// is required before accessing one of these objects.
979// -------------------------------------------------------------------------
980
981// Metadata allocator -- keeps stats about how many bytes allocated
982static uint64_t metadata_system_bytes = 0;
983static void* MetaDataAlloc(size_t bytes) {
984 void* result = TCMalloc_SystemAlloc(bytes, 0);
985 if (result != NULL) {
986 metadata_system_bytes += bytes;
987 }
988 return result;
989}
990
991template <class T>
992class PageHeapAllocator {
993 private:
994 // How much to allocate from system at a time
995 static const size_t kAllocIncrement = 32 << 10;
996
997 // Aligned size of T
998 static const size_t kAlignedSize
999 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
1000
1001 // Free area from which to carve new objects
1002 char* free_area_;
1003 size_t free_avail_;
1004
ba379fdc
A
1005 // Linked list of all regions allocated by this allocator
1006 void* allocated_regions_;
1007
b37bf2e1
A
1008 // Free list of already carved objects
1009 void* free_list_;
1010
1011 // Number of allocated but unfreed objects
1012 int inuse_;
1013
1014 public:
1015 void Init() {
1016 ASSERT(kAlignedSize <= kAllocIncrement);
1017 inuse_ = 0;
ba379fdc 1018 allocated_regions_ = 0;
b37bf2e1
A
1019 free_area_ = NULL;
1020 free_avail_ = 0;
1021 free_list_ = NULL;
1022 }
1023
1024 T* New() {
1025 // Consult free list
1026 void* result;
1027 if (free_list_ != NULL) {
1028 result = free_list_;
1029 free_list_ = *(reinterpret_cast<void**>(result));
1030 } else {
1031 if (free_avail_ < kAlignedSize) {
1032 // Need more room
ba379fdc
A
1033 char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
1034 if (!new_allocation)
1035 CRASH();
1036
14957cd0 1037 *reinterpret_cast_ptr<void**>(new_allocation) = allocated_regions_;
ba379fdc
A
1038 allocated_regions_ = new_allocation;
1039 free_area_ = new_allocation + kAlignedSize;
1040 free_avail_ = kAllocIncrement - kAlignedSize;
b37bf2e1
A
1041 }
1042 result = free_area_;
1043 free_area_ += kAlignedSize;
1044 free_avail_ -= kAlignedSize;
1045 }
1046 inuse_++;
1047 return reinterpret_cast<T*>(result);
1048 }
1049
1050 void Delete(T* p) {
1051 *(reinterpret_cast<void**>(p)) = free_list_;
1052 free_list_ = p;
1053 inuse_--;
1054 }
1055
1056 int inuse() const { return inuse_; }
ba379fdc 1057
f9bf01c6 1058#if defined(WTF_CHANGES) && OS(DARWIN)
ba379fdc
A
1059 template <class Recorder>
1060 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
1061 {
14957cd0
A
1062 for (void* adminAllocation = allocated_regions_; adminAllocation; adminAllocation = reader.nextEntryInLinkedList(reinterpret_cast<void**>(adminAllocation)))
1063 recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation), kAllocIncrement);
ba379fdc
A
1064 }
1065#endif
b37bf2e1
A
1066};
1067
1068// -------------------------------------------------------------------------
1069// Span - a contiguous run of pages
1070// -------------------------------------------------------------------------
1071
1072// Type that can hold a page number
1073typedef uintptr_t PageID;
1074
1075// Type that can hold the length of a run of pages
1076typedef uintptr_t Length;
1077
1078static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
1079
1080// Convert byte size into pages. This won't overflow, but may return
1081// an unreasonably large value if bytes is huge enough.
1082static inline Length pages(size_t bytes) {
1083 return (bytes >> kPageShift) +
1084 ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
1085}
1086
1087// Convert a user size into the number of bytes that will actually be
1088// allocated
1089static size_t AllocationSize(size_t bytes) {
1090 if (bytes > kMaxSize) {
1091 // Large object: we allocate an integral number of pages
1092 ASSERT(bytes <= (kMaxValidPages << kPageShift));
1093 return pages(bytes) << kPageShift;
1094 } else {
1095 // Small object: find the size class to which it belongs
1096 return ByteSizeForClass(SizeClass(bytes));
1097 }
1098}
1099
1100// Information kept for a span (a contiguous run of pages).
1101struct Span {
1102 PageID start; // Starting page number
1103 Length length; // Number of pages in span
1104 Span* next; // Used when in link list
1105 Span* prev; // Used when in link list
1106 void* objects; // Linked list of free objects
1107 unsigned int free : 1; // Is the span free
9dae56ea 1108#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1 1109 unsigned int sample : 1; // Sampled object?
9dae56ea 1110#endif
b37bf2e1
A
1111 unsigned int sizeclass : 8; // Size-class for small objects (or 0)
1112 unsigned int refcount : 11; // Number of non-free objects
9dae56ea 1113 bool decommitted : 1;
b37bf2e1
A
1114
1115#undef SPAN_HISTORY
1116#ifdef SPAN_HISTORY
1117 // For debugging, we can keep a log events per span
1118 int nexthistory;
1119 char history[64];
1120 int value[64];
1121#endif
1122};
1123
9dae56ea 1124#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
9dae56ea 1125
b37bf2e1
A
1126#ifdef SPAN_HISTORY
1127void Event(Span* span, char op, int v = 0) {
1128 span->history[span->nexthistory] = op;
1129 span->value[span->nexthistory] = v;
1130 span->nexthistory++;
1131 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
1132}
1133#else
1134#define Event(s,o,v) ((void) 0)
1135#endif
1136
1137// Allocator/deallocator for spans
1138static PageHeapAllocator<Span> span_allocator;
1139static Span* NewSpan(PageID p, Length len) {
1140 Span* result = span_allocator.New();
1141 memset(result, 0, sizeof(*result));
1142 result->start = p;
1143 result->length = len;
1144#ifdef SPAN_HISTORY
1145 result->nexthistory = 0;
1146#endif
1147 return result;
1148}
1149
1150static inline void DeleteSpan(Span* span) {
1151#ifndef NDEBUG
1152 // In debug mode, trash the contents of deleted Spans
1153 memset(span, 0x3f, sizeof(*span));
1154#endif
1155 span_allocator.Delete(span);
1156}
1157
1158// -------------------------------------------------------------------------
1159// Doubly linked list of spans.
1160// -------------------------------------------------------------------------
1161
1162static inline void DLL_Init(Span* list) {
1163 list->next = list;
1164 list->prev = list;
1165}
1166
1167static inline void DLL_Remove(Span* span) {
1168 span->prev->next = span->next;
1169 span->next->prev = span->prev;
1170 span->prev = NULL;
1171 span->next = NULL;
1172}
1173
1174static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
1175 return list->next == list;
1176}
1177
b37bf2e1
A
1178static int DLL_Length(const Span* list) {
1179 int result = 0;
1180 for (Span* s = list->next; s != list; s = s->next) {
1181 result++;
1182 }
1183 return result;
1184}
b37bf2e1
A
1185
1186#if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1187static void DLL_Print(const char* label, const Span* list) {
1188 MESSAGE("%-10s %p:", label, list);
1189 for (const Span* s = list->next; s != list; s = s->next) {
1190 MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
1191 }
1192 MESSAGE("\n");
1193}
1194#endif
1195
1196static inline void DLL_Prepend(Span* list, Span* span) {
1197 ASSERT(span->next == NULL);
1198 ASSERT(span->prev == NULL);
1199 span->next = list->next;
1200 span->prev = list;
1201 list->next->prev = span;
1202 list->next = span;
1203}
1204
1205// -------------------------------------------------------------------------
1206// Stack traces kept for sampled allocations
1207// The following state is protected by pageheap_lock_.
1208// -------------------------------------------------------------------------
1209
1210// size/depth are made the same size as a pointer so that some generic
1211// code below can conveniently cast them back and forth to void*.
1212static const int kMaxStackDepth = 31;
1213struct StackTrace {
1214 uintptr_t size; // Size of object
1215 uintptr_t depth; // Number of PC values stored in array below
1216 void* stack[kMaxStackDepth];
1217};
1218static PageHeapAllocator<StackTrace> stacktrace_allocator;
1219static Span sampled_objects;
1220
1221// -------------------------------------------------------------------------
1222// Map from page-id to per-page data
1223// -------------------------------------------------------------------------
1224
1225// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1226// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1227// because sometimes the sizeclass is all the information we need.
1228
1229// Selector class -- general selector uses 3-level map
1230template <int BITS> class MapSelector {
1231 public:
1232 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
1233 typedef PackedCache<BITS, uint64_t> CacheType;
1234};
1235
9dae56ea 1236#if defined(WTF_CHANGES)
f9bf01c6 1237#if CPU(X86_64)
9dae56ea
A
1238// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1239// can be excluded from the PageMap key.
1240// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1241
1242static const size_t kBitsUnusedOn64Bit = 16;
1243#else
1244static const size_t kBitsUnusedOn64Bit = 0;
1245#endif
1246
1247// A three-level map for 64-bit machines
1248template <> class MapSelector<64> {
1249 public:
1250 typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
1251 typedef PackedCache<64, uint64_t> CacheType;
1252};
1253#endif
1254
b37bf2e1
A
1255// A two-level map for 32-bit machines
1256template <> class MapSelector<32> {
1257 public:
9dae56ea
A
1258 typedef TCMalloc_PageMap2<32 - kPageShift> Type;
1259 typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
b37bf2e1
A
1260};
1261
1262// -------------------------------------------------------------------------
1263// Page-level allocator
1264// * Eager coalescing
1265//
1266// Heap for page-level allocation. We allow allocating and freeing a
1267// contiguous runs of pages (called a "span").
1268// -------------------------------------------------------------------------
1269
ba379fdc 1270#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
4e4e5a6f
A
1271// The page heap maintains a free list for spans that are no longer in use by
1272// the central cache or any thread caches. We use a background thread to
1273// periodically scan the free list and release a percentage of it back to the OS.
1274
1275// If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
1276// background thread:
1277// - wakes up
1278// - pauses for kScavengeDelayInSeconds
1279// - returns to the OS a percentage of the memory that remained unused during
1280// that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
1281// The goal of this strategy is to reduce memory pressure in a timely fashion
1282// while avoiding thrashing the OS allocator.
1283
1284// Time delay before the page heap scavenger will consider returning pages to
1285// the OS.
1286static const int kScavengeDelayInSeconds = 2;
1287
1288// Approximate percentage of free committed pages to return to the OS in one
1289// scavenge.
1290static const float kScavengePercentage = .5f;
1291
1292// number of span lists to keep spans in when memory is returned.
1293static const int kMinSpanListsWithSpans = 32;
1294
1295// Number of free committed pages that we want to keep around. The minimum number of pages used when there
1296// is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages.
1297static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
1298
ba379fdc
A
1299#endif
1300
14957cd0
A
1301static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
1302
b37bf2e1
A
1303class TCMalloc_PageHeap {
1304 public:
1305 void init();
1306
1307 // Allocate a run of "n" pages. Returns zero if out of memory.
1308 Span* New(Length n);
1309
1310 // Delete the span "[p, p+n-1]".
1311 // REQUIRES: span was returned by earlier call to New() and
1312 // has not yet been deleted.
1313 void Delete(Span* span);
1314
1315 // Mark an allocated span as being used for small objects of the
1316 // specified size-class.
1317 // REQUIRES: span was returned by an earlier call to New()
1318 // and has not yet been deleted.
1319 void RegisterSizeClass(Span* span, size_t sc);
1320
1321 // Split an allocated span into two spans: one of length "n" pages
1322 // followed by another span of length "span->length - n" pages.
1323 // Modifies "*span" to point to the first span of length "n" pages.
1324 // Returns a pointer to the second span.
1325 //
1326 // REQUIRES: "0 < n < span->length"
1327 // REQUIRES: !span->free
1328 // REQUIRES: span->sizeclass == 0
1329 Span* Split(Span* span, Length n);
1330
1331 // Return the descriptor for the specified page.
1332 inline Span* GetDescriptor(PageID p) const {
1333 return reinterpret_cast<Span*>(pagemap_.get(p));
1334 }
1335
1336#ifdef WTF_CHANGES
1337 inline Span* GetDescriptorEnsureSafe(PageID p)
1338 {
1339 pagemap_.Ensure(p, 1);
1340 return GetDescriptor(p);
1341 }
9dae56ea
A
1342
1343 size_t ReturnedBytes() const;
b37bf2e1
A
1344#endif
1345
1346 // Dump state to stderr
1347#ifndef WTF_CHANGES
1348 void Dump(TCMalloc_Printer* out);
1349#endif
1350
1351 // Return number of bytes allocated from system
1352 inline uint64_t SystemBytes() const { return system_bytes_; }
1353
1354 // Return number of free bytes in heap
1355 uint64_t FreeBytes() const {
1356 return (static_cast<uint64_t>(free_pages_) << kPageShift);
1357 }
1358
1359 bool Check();
14957cd0 1360 bool CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted);
b37bf2e1
A
1361
1362 // Release all pages on the free list for reuse by the OS:
1363 void ReleaseFreePages();
1364
1365 // Return 0 if we have no information, or else the correct sizeclass for p.
1366 // Reads and writes to pagemap_cache_ do not require locking.
1367 // The entries are 64 bits on 64-bit hardware and 16 bits on
1368 // 32-bit hardware, and we don't mind raciness as long as each read of
1369 // an entry yields a valid entry, not a partially updated entry.
1370 size_t GetSizeClassIfCached(PageID p) const {
1371 return pagemap_cache_.GetOrDefault(p, 0);
1372 }
1373 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
1374
1375 private:
1376 // Pick the appropriate map and cache types based on pointer size
1377 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
1378 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
1379 PageMap pagemap_;
1380 mutable PageMapCache pagemap_cache_;
1381
1382 // We segregate spans of a given size into two circular linked
1383 // lists: one for normal spans, and one for spans whose memory
1384 // has been returned to the system.
1385 struct SpanList {
1386 Span normal;
1387 Span returned;
1388 };
1389
1390 // List of free spans of length >= kMaxPages
1391 SpanList large_;
1392
1393 // Array mapping from span length to a doubly linked list of free spans
1394 SpanList free_[kMaxPages];
1395
1396 // Number of pages kept in free lists
1397 uintptr_t free_pages_;
1398
1399 // Bytes allocated from system
1400 uint64_t system_bytes_;
1401
ba379fdc
A
1402#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1403 // Number of pages kept in free lists that are still committed.
1404 Length free_committed_pages_;
1405
4e4e5a6f
A
1406 // Minimum number of free committed pages since last scavenge. (Can be 0 if
1407 // we've committed new pages since the last scavenge.)
1408 Length min_free_committed_pages_since_last_scavenge_;
ba379fdc
A
1409#endif
1410
b37bf2e1
A
1411 bool GrowHeap(Length n);
1412
1413 // REQUIRES span->length >= n
1414 // Remove span from its free list, and move any leftover part of
1415 // span into appropriate free lists. Also update "span" to have
1416 // length exactly "n" and mark it as non-free so it can be returned
1417 // to the client.
1418 //
1419 // "released" is true iff "span" was found on a "returned" list.
1420 void Carve(Span* span, Length n, bool released);
1421
1422 void RecordSpan(Span* span) {
1423 pagemap_.set(span->start, span);
1424 if (span->length > 1) {
1425 pagemap_.set(span->start + span->length - 1, span);
1426 }
1427 }
1428
1429 // Allocate a large span of length == n. If successful, returns a
1430 // span of exactly the specified length. Else, returns NULL.
1431 Span* AllocLarge(Length n);
1432
ba379fdc 1433#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1434 // Incrementally release some memory to the system.
1435 // IncrementalScavenge(n) is called whenever n pages are freed.
1436 void IncrementalScavenge(Length n);
ba379fdc 1437#endif
b37bf2e1
A
1438
1439 // Number of pages to deallocate before doing more scavenging
1440 int64_t scavenge_counter_;
1441
1442 // Index of last free list we scavenged
1443 size_t scavenge_index_;
1444
f9bf01c6 1445#if defined(WTF_CHANGES) && OS(DARWIN)
b37bf2e1
A
1446 friend class FastMallocZone;
1447#endif
ba379fdc
A
1448
1449#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
f9bf01c6
A
1450 void initializeScavenger();
1451 ALWAYS_INLINE void signalScavenger();
1452 void scavenge();
4e4e5a6f 1453 ALWAYS_INLINE bool shouldScavenge() const;
ba379fdc 1454
14957cd0
A
1455#if HAVE(DISPATCH_H) || OS(WINDOWS)
1456 void periodicScavenge();
1457 ALWAYS_INLINE bool isScavengerSuspended();
1458 ALWAYS_INLINE void scheduleScavenger();
1459 ALWAYS_INLINE void rescheduleScavenger();
1460 ALWAYS_INLINE void suspendScavenger();
1461#endif
1462
1463#if HAVE(DISPATCH_H)
1464 dispatch_queue_t m_scavengeQueue;
1465 dispatch_source_t m_scavengeTimer;
1466 bool m_scavengingSuspended;
1467#elif OS(WINDOWS)
1468 static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
1469 HANDLE m_scavengeQueueTimer;
1470#else
4e4e5a6f 1471 static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
ba379fdc
A
1472 NO_RETURN void scavengerThread();
1473
4e4e5a6f 1474 // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
f9bf01c6
A
1475 // it's blocked waiting for more pages to be deleted.
1476 bool m_scavengeThreadActive;
ba379fdc
A
1477
1478 pthread_mutex_t m_scavengeMutex;
ba379fdc 1479 pthread_cond_t m_scavengeCondition;
f9bf01c6 1480#endif
ba379fdc 1481
ba379fdc 1482#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1483};
1484
1485void TCMalloc_PageHeap::init()
1486{
1487 pagemap_.init(MetaDataAlloc);
1488 pagemap_cache_ = PageMapCache(0);
1489 free_pages_ = 0;
1490 system_bytes_ = 0;
ba379fdc
A
1491
1492#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1493 free_committed_pages_ = 0;
4e4e5a6f 1494 min_free_committed_pages_since_last_scavenge_ = 0;
ba379fdc
A
1495#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1496
b37bf2e1
A
1497 scavenge_counter_ = 0;
1498 // Start scavenging at kMaxPages list
1499 scavenge_index_ = kMaxPages-1;
1500 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
1501 DLL_Init(&large_.normal);
1502 DLL_Init(&large_.returned);
1503 for (size_t i = 0; i < kMaxPages; i++) {
1504 DLL_Init(&free_[i].normal);
1505 DLL_Init(&free_[i].returned);
1506 }
ba379fdc
A
1507
1508#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
f9bf01c6
A
1509 initializeScavenger();
1510#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1511}
1512
1513#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1514
14957cd0 1515#if HAVE(DISPATCH_H)
f9bf01c6
A
1516
1517void TCMalloc_PageHeap::initializeScavenger()
1518{
14957cd0
A
1519 m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
1520 m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
1521 dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeDelayInSeconds * NSEC_PER_SEC);
1522 dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
1523 dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
1524 m_scavengingSuspended = true;
b37bf2e1
A
1525}
1526
14957cd0 1527ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
ba379fdc 1528{
14957cd0
A
1529 ASSERT(pageheap_lock.IsHeld());
1530 return m_scavengingSuspended;
ba379fdc
A
1531}
1532
14957cd0
A
1533ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
1534{
1535 ASSERT(pageheap_lock.IsHeld());
1536 m_scavengingSuspended = false;
1537 dispatch_resume(m_scavengeTimer);
1538}
1539
1540ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
1541{
1542 // Nothing to do here for libdispatch.
1543}
1544
1545ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
1546{
1547 ASSERT(pageheap_lock.IsHeld());
1548 m_scavengingSuspended = true;
1549 dispatch_suspend(m_scavengeTimer);
1550}
1551
1552#elif OS(WINDOWS)
1553
1554void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN)
1555{
1556 static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge();
1557}
1558
1559void TCMalloc_PageHeap::initializeScavenger()
1560{
1561 m_scavengeQueueTimer = 0;
1562}
1563
1564ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
1565{
1566 ASSERT(IsHeld(pageheap_lock));
1567 return !m_scavengeQueueTimer;
1568}
1569
1570ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
1571{
1572 // We need to use WT_EXECUTEONLYONCE here and reschedule the timer, because
1573 // Windows will fire the timer event even when the function is already running.
1574 ASSERT(IsHeld(pageheap_lock));
1575 CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, kScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE);
1576}
1577
1578ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
1579{
1580 // We must delete the timer and create it again, because it is not possible to retrigger a timer on Windows.
1581 suspendScavenger();
1582 scheduleScavenger();
1583}
1584
1585ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
f9bf01c6 1586{
14957cd0
A
1587 ASSERT(IsHeld(pageheap_lock));
1588 HANDLE scavengeQueueTimer = m_scavengeQueueTimer;
1589 m_scavengeQueueTimer = 0;
1590 DeleteTimerQueueTimer(0, scavengeQueueTimer, 0);
f9bf01c6
A
1591}
1592
14957cd0 1593#else
f9bf01c6
A
1594
1595void TCMalloc_PageHeap::initializeScavenger()
1596{
14957cd0
A
1597 // Create a non-recursive mutex.
1598#if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT
1599 pthread_mutex_init(&m_scavengeMutex, 0);
1600#else
1601 pthread_mutexattr_t attr;
1602 pthread_mutexattr_init(&attr);
1603 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
1604
1605 pthread_mutex_init(&m_scavengeMutex, &attr);
1606
1607 pthread_mutexattr_destroy(&attr);
1608#endif
1609
1610 pthread_cond_init(&m_scavengeCondition, 0);
1611 m_scavengeThreadActive = true;
1612 pthread_t thread;
1613 pthread_create(&thread, 0, runScavengerThread, this);
1614}
1615
1616void* TCMalloc_PageHeap::runScavengerThread(void* context)
1617{
1618 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1619#if (COMPILER(MSVC) || COMPILER(SUNCC))
1620 // Without this, Visual Studio and Sun Studio will complain that this method does not return a value.
1621 return 0;
1622#endif
f9bf01c6
A
1623}
1624
1625ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1626{
14957cd0
A
1627 // m_scavengeMutex should be held before accessing m_scavengeThreadActive.
1628 ASSERT(pthread_mutex_trylock(m_scavengeMutex));
1629 if (!m_scavengeThreadActive && shouldScavenge())
1630 pthread_cond_signal(&m_scavengeCondition);
f9bf01c6
A
1631}
1632
1633#endif
1634
4e4e5a6f 1635void TCMalloc_PageHeap::scavenge()
ba379fdc 1636{
4e4e5a6f
A
1637 size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
1638 size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
1639
14957cd0 1640 Length lastFreeCommittedPages = free_committed_pages_;
4e4e5a6f 1641 while (free_committed_pages_ > targetPageCount) {
14957cd0 1642 ASSERT(Check());
4e4e5a6f
A
1643 for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) {
1644 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1645 // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span.
1646 // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left.
14957cd0
A
1647 size_t length = DLL_Length(&slist->normal);
1648 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2;
4e4e5a6f
A
1649 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal) && free_committed_pages_ > targetPageCount; j++) {
1650 Span* s = slist->normal.prev;
1651 DLL_Remove(s);
1652 ASSERT(!s->decommitted);
1653 if (!s->decommitted) {
1654 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1655 static_cast<size_t>(s->length << kPageShift));
1656 ASSERT(free_committed_pages_ >= s->length);
1657 free_committed_pages_ -= s->length;
1658 s->decommitted = true;
1659 }
1660 DLL_Prepend(&slist->returned, s);
ba379fdc 1661 }
ba379fdc 1662 }
14957cd0
A
1663
1664 if (lastFreeCommittedPages == free_committed_pages_)
1665 break;
1666 lastFreeCommittedPages = free_committed_pages_;
ba379fdc 1667 }
4e4e5a6f
A
1668
1669 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
ba379fdc
A
1670}
1671
4e4e5a6f 1672ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
ba379fdc
A
1673{
1674 return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1675}
1676
1677#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1678
b37bf2e1
A
1679inline Span* TCMalloc_PageHeap::New(Length n) {
1680 ASSERT(Check());
1681 ASSERT(n > 0);
1682
1683 // Find first size >= n that has a non-empty list
1684 for (Length s = n; s < kMaxPages; s++) {
1685 Span* ll = NULL;
1686 bool released = false;
1687 if (!DLL_IsEmpty(&free_[s].normal)) {
1688 // Found normal span
1689 ll = &free_[s].normal;
1690 } else if (!DLL_IsEmpty(&free_[s].returned)) {
1691 // Found returned span; reallocate it
1692 ll = &free_[s].returned;
1693 released = true;
1694 } else {
1695 // Keep looking in larger classes
1696 continue;
1697 }
1698
1699 Span* result = ll->next;
1700 Carve(result, n, released);
ba379fdc 1701#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
4e4e5a6f
A
1702 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1703 // free committed pages count.
1704 ASSERT(free_committed_pages_ >= n);
1705 free_committed_pages_ -= n;
1706 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1707 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
ba379fdc 1708#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1709 ASSERT(Check());
1710 free_pages_ -= n;
1711 return result;
1712 }
1713
1714 Span* result = AllocLarge(n);
9dae56ea
A
1715 if (result != NULL) {
1716 ASSERT_SPAN_COMMITTED(result);
1717 return result;
1718 }
b37bf2e1
A
1719
1720 // Grow the heap and try again
1721 if (!GrowHeap(n)) {
1722 ASSERT(Check());
1723 return NULL;
1724 }
1725
1726 return AllocLarge(n);
1727}
1728
1729Span* TCMalloc_PageHeap::AllocLarge(Length n) {
1730 // find the best span (closest to n in size).
1731 // The following loops implements address-ordered best-fit.
1732 bool from_released = false;
1733 Span *best = NULL;
1734
1735 // Search through normal list
1736 for (Span* span = large_.normal.next;
1737 span != &large_.normal;
1738 span = span->next) {
1739 if (span->length >= n) {
1740 if ((best == NULL)
1741 || (span->length < best->length)
1742 || ((span->length == best->length) && (span->start < best->start))) {
1743 best = span;
1744 from_released = false;
1745 }
1746 }
1747 }
1748
1749 // Search through released list in case it has a better fit
1750 for (Span* span = large_.returned.next;
1751 span != &large_.returned;
1752 span = span->next) {
1753 if (span->length >= n) {
1754 if ((best == NULL)
1755 || (span->length < best->length)
1756 || ((span->length == best->length) && (span->start < best->start))) {
1757 best = span;
1758 from_released = true;
1759 }
1760 }
1761 }
1762
1763 if (best != NULL) {
1764 Carve(best, n, from_released);
ba379fdc 1765#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
4e4e5a6f
A
1766 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1767 // free committed pages count.
1768 ASSERT(free_committed_pages_ >= n);
1769 free_committed_pages_ -= n;
1770 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1771 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
ba379fdc 1772#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1773 ASSERT(Check());
1774 free_pages_ -= n;
1775 return best;
1776 }
1777 return NULL;
1778}
1779
1780Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
1781 ASSERT(0 < n);
1782 ASSERT(n < span->length);
1783 ASSERT(!span->free);
1784 ASSERT(span->sizeclass == 0);
1785 Event(span, 'T', n);
1786
1787 const Length extra = span->length - n;
1788 Span* leftover = NewSpan(span->start + n, extra);
1789 Event(leftover, 'U', extra);
1790 RecordSpan(leftover);
1791 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
1792 span->length = n;
1793
1794 return leftover;
1795}
1796
1797inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
1798 ASSERT(n > 0);
1799 DLL_Remove(span);
1800 span->free = 0;
1801 Event(span, 'A', n);
1802
4e4e5a6f
A
1803 if (released) {
1804 // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
1805 ASSERT(span->decommitted);
1806 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
1807 span->decommitted = false;
1808#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1809 free_committed_pages_ += span->length;
1810#endif
1811 }
1812
b37bf2e1
A
1813 const int extra = static_cast<int>(span->length - n);
1814 ASSERT(extra >= 0);
1815 if (extra > 0) {
1816 Span* leftover = NewSpan(span->start + n, extra);
1817 leftover->free = 1;
4e4e5a6f 1818 leftover->decommitted = false;
b37bf2e1
A
1819 Event(leftover, 'S', extra);
1820 RecordSpan(leftover);
1821
1822 // Place leftover span on appropriate free list
1823 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
4e4e5a6f 1824 Span* dst = &listpair->normal;
b37bf2e1
A
1825 DLL_Prepend(dst, leftover);
1826
1827 span->length = n;
1828 pagemap_.set(span->start + n - 1, span);
1829 }
1830}
1831
9dae56ea
A
1832static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
1833{
ba379fdc
A
1834 if (destination->decommitted && !other->decommitted) {
1835 TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
1836 static_cast<size_t>(other->length << kPageShift));
1837 } else if (other->decommitted && !destination->decommitted) {
1838 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
1839 static_cast<size_t>(destination->length << kPageShift));
9dae56ea 1840 destination->decommitted = true;
ba379fdc 1841 }
9dae56ea 1842}
9dae56ea 1843
b37bf2e1
A
1844inline void TCMalloc_PageHeap::Delete(Span* span) {
1845 ASSERT(Check());
1846 ASSERT(!span->free);
1847 ASSERT(span->length > 0);
1848 ASSERT(GetDescriptor(span->start) == span);
1849 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
1850 span->sizeclass = 0;
9dae56ea 1851#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1 1852 span->sample = 0;
9dae56ea 1853#endif
b37bf2e1
A
1854
1855 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1856 // necessary. We do not bother resetting the stale pagemap
1857 // entries for the pieces we are merging together because we only
1858 // care about the pagemap entries for the boundaries.
ba379fdc
A
1859#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1860 // Track the total size of the neighboring free spans that are committed.
1861 Length neighboringCommittedSpansLength = 0;
1862#endif
b37bf2e1
A
1863 const PageID p = span->start;
1864 const Length n = span->length;
1865 Span* prev = GetDescriptor(p-1);
1866 if (prev != NULL && prev->free) {
1867 // Merge preceding span into this span
1868 ASSERT(prev->start + prev->length == p);
1869 const Length len = prev->length;
ba379fdc
A
1870#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1871 if (!prev->decommitted)
1872 neighboringCommittedSpansLength += len;
1873#endif
9dae56ea 1874 mergeDecommittedStates(span, prev);
b37bf2e1
A
1875 DLL_Remove(prev);
1876 DeleteSpan(prev);
1877 span->start -= len;
1878 span->length += len;
1879 pagemap_.set(span->start, span);
1880 Event(span, 'L', len);
1881 }
1882 Span* next = GetDescriptor(p+n);
1883 if (next != NULL && next->free) {
1884 // Merge next span into this span
1885 ASSERT(next->start == p+n);
1886 const Length len = next->length;
ba379fdc
A
1887#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1888 if (!next->decommitted)
1889 neighboringCommittedSpansLength += len;
1890#endif
9dae56ea 1891 mergeDecommittedStates(span, next);
b37bf2e1
A
1892 DLL_Remove(next);
1893 DeleteSpan(next);
1894 span->length += len;
1895 pagemap_.set(span->start + span->length - 1, span);
1896 Event(span, 'R', len);
1897 }
1898
1899 Event(span, 'D', span->length);
1900 span->free = 1;
ba379fdc
A
1901 if (span->decommitted) {
1902 if (span->length < kMaxPages)
1903 DLL_Prepend(&free_[span->length].returned, span);
1904 else
1905 DLL_Prepend(&large_.returned, span);
b37bf2e1 1906 } else {
ba379fdc
A
1907 if (span->length < kMaxPages)
1908 DLL_Prepend(&free_[span->length].normal, span);
1909 else
1910 DLL_Prepend(&large_.normal, span);
b37bf2e1
A
1911 }
1912 free_pages_ += n;
1913
ba379fdc
A
1914#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1915 if (span->decommitted) {
1916 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
1917 // committed. Update the free committed pages count.
1918 free_committed_pages_ -= neighboringCommittedSpansLength;
4e4e5a6f
A
1919 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1920 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
ba379fdc
A
1921 } else {
1922 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
1923 free_committed_pages_ += n;
1924 }
1925
1926 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
f9bf01c6 1927 signalScavenger();
ba379fdc 1928#else
b37bf2e1 1929 IncrementalScavenge(n);
ba379fdc
A
1930#endif
1931
b37bf2e1
A
1932 ASSERT(Check());
1933}
1934
ba379fdc 1935#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
b37bf2e1
A
1936void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
1937 // Fast path; not yet time to release memory
1938 scavenge_counter_ -= n;
1939 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
1940
8537cb5c 1941 static const size_t kDefaultReleaseDelay = 64;
b37bf2e1
A
1942
1943 // Find index of free list to scavenge
1944 size_t index = scavenge_index_ + 1;
1945 for (size_t i = 0; i < kMaxPages+1; i++) {
1946 if (index > kMaxPages) index = 0;
1947 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
1948 if (!DLL_IsEmpty(&slist->normal)) {
1949 // Release the last span on the normal portion of this list
1950 Span* s = slist->normal.prev;
1951 DLL_Remove(s);
1952 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1953 static_cast<size_t>(s->length << kPageShift));
9dae56ea 1954 s->decommitted = true;
b37bf2e1
A
1955 DLL_Prepend(&slist->returned, s);
1956
8537cb5c 1957 scavenge_counter_ = std::max<size_t>(16UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
b37bf2e1
A
1958
1959 if (index == kMaxPages && !DLL_IsEmpty(&slist->normal))
1960 scavenge_index_ = index - 1;
1961 else
1962 scavenge_index_ = index;
1963 return;
1964 }
1965 index++;
1966 }
1967
1968 // Nothing to scavenge, delay for a while
1969 scavenge_counter_ = kDefaultReleaseDelay;
1970}
ba379fdc 1971#endif
b37bf2e1
A
1972
1973void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
1974 // Associate span object with all interior pages as well
1975 ASSERT(!span->free);
1976 ASSERT(GetDescriptor(span->start) == span);
1977 ASSERT(GetDescriptor(span->start+span->length-1) == span);
1978 Event(span, 'C', sc);
1979 span->sizeclass = static_cast<unsigned int>(sc);
1980 for (Length i = 1; i < span->length-1; i++) {
1981 pagemap_.set(span->start+i, span);
1982 }
1983}
9dae56ea
A
1984
1985#ifdef WTF_CHANGES
1986size_t TCMalloc_PageHeap::ReturnedBytes() const {
1987 size_t result = 0;
1988 for (unsigned s = 0; s < kMaxPages; s++) {
1989 const int r_length = DLL_Length(&free_[s].returned);
1990 unsigned r_pages = s * r_length;
1991 result += r_pages << kPageShift;
1992 }
1993
1994 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next)
1995 result += s->length << kPageShift;
1996 return result;
1997}
1998#endif
b37bf2e1
A
1999
2000#ifndef WTF_CHANGES
2001static double PagesToMB(uint64_t pages) {
2002 return (pages << kPageShift) / 1048576.0;
2003}
2004
2005void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
2006 int nonempty_sizes = 0;
2007 for (int s = 0; s < kMaxPages; s++) {
2008 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
2009 nonempty_sizes++;
2010 }
2011 }
2012 out->printf("------------------------------------------------\n");
2013 out->printf("PageHeap: %d sizes; %6.1f MB free\n",
2014 nonempty_sizes, PagesToMB(free_pages_));
2015 out->printf("------------------------------------------------\n");
2016 uint64_t total_normal = 0;
2017 uint64_t total_returned = 0;
2018 for (int s = 0; s < kMaxPages; s++) {
2019 const int n_length = DLL_Length(&free_[s].normal);
2020 const int r_length = DLL_Length(&free_[s].returned);
2021 if (n_length + r_length > 0) {
2022 uint64_t n_pages = s * n_length;
2023 uint64_t r_pages = s * r_length;
2024 total_normal += n_pages;
2025 total_returned += r_pages;
2026 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
2027 "; unmapped: %6.1f MB; %6.1f MB cum\n",
2028 s,
2029 (n_length + r_length),
2030 PagesToMB(n_pages + r_pages),
2031 PagesToMB(total_normal + total_returned),
2032 PagesToMB(r_pages),
2033 PagesToMB(total_returned));
2034 }
2035 }
2036
2037 uint64_t n_pages = 0;
2038 uint64_t r_pages = 0;
2039 int n_spans = 0;
2040 int r_spans = 0;
2041 out->printf("Normal large spans:\n");
2042 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
2043 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
2044 s->length, PagesToMB(s->length));
2045 n_pages += s->length;
2046 n_spans++;
2047 }
2048 out->printf("Unmapped large spans:\n");
2049 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
2050 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
2051 s->length, PagesToMB(s->length));
2052 r_pages += s->length;
2053 r_spans++;
2054 }
2055 total_normal += n_pages;
2056 total_returned += r_pages;
2057 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
2058 "; unmapped: %6.1f MB; %6.1f MB cum\n",
2059 (n_spans + r_spans),
2060 PagesToMB(n_pages + r_pages),
2061 PagesToMB(total_normal + total_returned),
2062 PagesToMB(r_pages),
2063 PagesToMB(total_returned));
2064}
2065#endif
2066
2067bool TCMalloc_PageHeap::GrowHeap(Length n) {
2068 ASSERT(kMaxPages >= kMinSystemAlloc);
2069 if (n > kMaxValidPages) return false;
2070 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
2071 size_t actual_size;
2072 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
2073 if (ptr == NULL) {
2074 if (n < ask) {
2075 // Try growing just "n" pages
2076 ask = n;
9dae56ea 2077 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
b37bf2e1
A
2078 }
2079 if (ptr == NULL) return false;
2080 }
2081 ask = actual_size >> kPageShift;
2082
2083 uint64_t old_system_bytes = system_bytes_;
2084 system_bytes_ += (ask << kPageShift);
2085 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
2086 ASSERT(p > 0);
2087
2088 // If we have already a lot of pages allocated, just pre allocate a bunch of
2089 // memory for the page map. This prevents fragmentation by pagemap metadata
2090 // when a program keeps allocating and freeing large blocks.
2091
2092 if (old_system_bytes < kPageMapBigAllocationThreshold
2093 && system_bytes_ >= kPageMapBigAllocationThreshold) {
2094 pagemap_.PreallocateMoreMemory();
2095 }
2096
2097 // Make sure pagemap_ has entries for all of the new pages.
2098 // Plus ensure one before and one after so coalescing code
2099 // does not need bounds-checking.
2100 if (pagemap_.Ensure(p-1, ask+2)) {
2101 // Pretend the new area is allocated and then Delete() it to
2102 // cause any necessary coalescing to occur.
2103 //
2104 // We do not adjust free_pages_ here since Delete() will do it for us.
2105 Span* span = NewSpan(p, ask);
2106 RecordSpan(span);
2107 Delete(span);
2108 ASSERT(Check());
2109 return true;
2110 } else {
2111 // We could not allocate memory within "pagemap_"
2112 // TODO: Once we can return memory to the system, return the new span
2113 return false;
2114 }
2115}
2116
2117bool TCMalloc_PageHeap::Check() {
2118 ASSERT(free_[0].normal.next == &free_[0].normal);
2119 ASSERT(free_[0].returned.next == &free_[0].returned);
14957cd0
A
2120 CheckList(&large_.normal, kMaxPages, 1000000000, false);
2121 CheckList(&large_.returned, kMaxPages, 1000000000, true);
b37bf2e1 2122 for (Length s = 1; s < kMaxPages; s++) {
14957cd0
A
2123 CheckList(&free_[s].normal, s, s, false);
2124 CheckList(&free_[s].returned, s, s, true);
b37bf2e1
A
2125 }
2126 return true;
2127}
2128
2129#if ASSERT_DISABLED
14957cd0 2130bool TCMalloc_PageHeap::CheckList(Span*, Length, Length, bool) {
b37bf2e1
A
2131 return true;
2132}
2133#else
14957cd0 2134bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted) {
b37bf2e1
A
2135 for (Span* s = list->next; s != list; s = s->next) {
2136 CHECK_CONDITION(s->free);
2137 CHECK_CONDITION(s->length >= min_pages);
2138 CHECK_CONDITION(s->length <= max_pages);
2139 CHECK_CONDITION(GetDescriptor(s->start) == s);
2140 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
14957cd0 2141 CHECK_CONDITION(s->decommitted == decommitted);
b37bf2e1
A
2142 }
2143 return true;
2144}
2145#endif
2146
2147static void ReleaseFreeList(Span* list, Span* returned) {
2148 // Walk backwards through list so that when we push these
2149 // spans on the "returned" list, we preserve the order.
2150 while (!DLL_IsEmpty(list)) {
2151 Span* s = list->prev;
2152 DLL_Remove(s);
14957cd0 2153 s->decommitted = true;
b37bf2e1
A
2154 DLL_Prepend(returned, s);
2155 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2156 static_cast<size_t>(s->length << kPageShift));
2157 }
2158}
2159
2160void TCMalloc_PageHeap::ReleaseFreePages() {
2161 for (Length s = 0; s < kMaxPages; s++) {
2162 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
2163 }
2164 ReleaseFreeList(&large_.normal, &large_.returned);
2165 ASSERT(Check());
2166}
2167
2168//-------------------------------------------------------------------
2169// Free list
2170//-------------------------------------------------------------------
2171
2172class TCMalloc_ThreadCache_FreeList {
2173 private:
2174 void* list_; // Linked list of nodes
2175 uint16_t length_; // Current length
2176 uint16_t lowater_; // Low water mark for list length
2177
2178 public:
2179 void Init() {
2180 list_ = NULL;
2181 length_ = 0;
2182 lowater_ = 0;
2183 }
2184
2185 // Return current length of list
2186 int length() const {
2187 return length_;
2188 }
2189
2190 // Is list empty?
2191 bool empty() const {
2192 return list_ == NULL;
2193 }
2194
2195 // Low-water mark management
2196 int lowwatermark() const { return lowater_; }
2197 void clear_lowwatermark() { lowater_ = length_; }
2198
2199 ALWAYS_INLINE void Push(void* ptr) {
2200 SLL_Push(&list_, ptr);
2201 length_++;
2202 }
2203
2204 void PushRange(int N, void *start, void *end) {
2205 SLL_PushRange(&list_, start, end);
2206 length_ = length_ + static_cast<uint16_t>(N);
2207 }
2208
2209 void PopRange(int N, void **start, void **end) {
2210 SLL_PopRange(&list_, N, start, end);
2211 ASSERT(length_ >= N);
2212 length_ = length_ - static_cast<uint16_t>(N);
2213 if (length_ < lowater_) lowater_ = length_;
2214 }
2215
2216 ALWAYS_INLINE void* Pop() {
2217 ASSERT(list_ != NULL);
2218 length_--;
2219 if (length_ < lowater_) lowater_ = length_;
2220 return SLL_Pop(&list_);
2221 }
2222
2223#ifdef WTF_CHANGES
2224 template <class Finder, class Reader>
2225 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2226 {
14957cd0 2227 for (void* nextObject = list_; nextObject; nextObject = reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))
b37bf2e1
A
2228 finder.visit(nextObject);
2229 }
2230#endif
2231};
2232
2233//-------------------------------------------------------------------
2234// Data kept per thread
2235//-------------------------------------------------------------------
2236
2237class TCMalloc_ThreadCache {
2238 private:
2239 typedef TCMalloc_ThreadCache_FreeList FreeList;
14957cd0 2240#if OS(WINDOWS)
b37bf2e1
A
2241 typedef DWORD ThreadIdentifier;
2242#else
2243 typedef pthread_t ThreadIdentifier;
2244#endif
2245
2246 size_t size_; // Combined size of data
2247 ThreadIdentifier tid_; // Which thread owns it
2248 bool in_setspecific_; // Called pthread_setspecific?
2249 FreeList list_[kNumClasses]; // Array indexed by size-class
2250
2251 // We sample allocations, biased by the size of the allocation
2252 uint32_t rnd_; // Cheap random number generator
2253 size_t bytes_until_sample_; // Bytes until we sample next
2254
2255 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2256 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);
2257
2258 // Use only as pthread thread-specific destructor function.
2259 static void DestroyThreadCache(void* ptr);
2260 public:
2261 // All ThreadCache objects are kept in a linked list (for stats collection)
2262 TCMalloc_ThreadCache* next_;
2263 TCMalloc_ThreadCache* prev_;
2264
2265 void Init(ThreadIdentifier tid);
2266 void Cleanup();
2267
2268 // Accessors (mostly just for printing stats)
2269 int freelist_length(size_t cl) const { return list_[cl].length(); }
2270
2271 // Total byte size in cache
2272 size_t Size() const { return size_; }
2273
4e4e5a6f 2274 ALWAYS_INLINE void* Allocate(size_t size);
b37bf2e1
A
2275 void Deallocate(void* ptr, size_t size_class);
2276
4e4e5a6f 2277 ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize);
b37bf2e1
A
2278 void ReleaseToCentralCache(size_t cl, int N);
2279 void Scavenge();
2280 void Print() const;
2281
2282 // Record allocation of "k" bytes. Return true iff allocation
2283 // should be sampled
2284 bool SampleAllocation(size_t k);
2285
2286 // Pick next sampling point
2287 void PickNextSample(size_t k);
2288
2289 static void InitModule();
2290 static void InitTSD();
2291 static TCMalloc_ThreadCache* GetThreadHeap();
2292 static TCMalloc_ThreadCache* GetCache();
2293 static TCMalloc_ThreadCache* GetCacheIfPresent();
2294 static TCMalloc_ThreadCache* CreateCacheIfNecessary();
2295 static void DeleteCache(TCMalloc_ThreadCache* heap);
2296 static void BecomeIdle();
2297 static void RecomputeThreadCacheSize();
2298
2299#ifdef WTF_CHANGES
2300 template <class Finder, class Reader>
2301 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2302 {
2303 for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
2304 list_[sizeClass].enumerateFreeObjects(finder, reader);
2305 }
2306#endif
2307};
2308
2309//-------------------------------------------------------------------
2310// Data kept per size-class in central cache
2311//-------------------------------------------------------------------
2312
2313class TCMalloc_Central_FreeList {
2314 public:
2315 void Init(size_t cl);
2316
2317 // These methods all do internal locking.
2318
2319 // Insert the specified range into the central freelist. N is the number of
2320 // elements in the range.
2321 void InsertRange(void *start, void *end, int N);
2322
2323 // Returns the actual number of fetched elements into N.
2324 void RemoveRange(void **start, void **end, int *N);
2325
2326 // Returns the number of free objects in cache.
2327 size_t length() {
2328 SpinLockHolder h(&lock_);
2329 return counter_;
2330 }
2331
2332 // Returns the number of free objects in the transfer cache.
2333 int tc_length() {
2334 SpinLockHolder h(&lock_);
2335 return used_slots_ * num_objects_to_move[size_class_];
2336 }
2337
2338#ifdef WTF_CHANGES
2339 template <class Finder, class Reader>
9dae56ea 2340 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
b37bf2e1
A
2341 {
2342 for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0))
2343 ASSERT(!span->objects);
2344
2345 ASSERT(!nonempty_.objects);
9dae56ea
A
2346 static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
2347
2348 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
2349 Span* remoteSpan = nonempty_.next;
2350
2351 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {
14957cd0 2352 for (void* nextObject = span->objects; nextObject; nextObject = reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))
b37bf2e1
A
2353 finder.visit(nextObject);
2354 }
2355 }
2356#endif
2357
2358 private:
2359 // REQUIRES: lock_ is held
2360 // Remove object from cache and return.
2361 // Return NULL if no free entries in cache.
2362 void* FetchFromSpans();
2363
2364 // REQUIRES: lock_ is held
2365 // Remove object from cache and return. Fetches
2366 // from pageheap if cache is empty. Only returns
2367 // NULL on allocation failure.
2368 void* FetchFromSpansSafe();
2369
2370 // REQUIRES: lock_ is held
2371 // Release a linked list of objects to spans.
2372 // May temporarily release lock_.
2373 void ReleaseListToSpans(void *start);
2374
2375 // REQUIRES: lock_ is held
2376 // Release an object to spans.
2377 // May temporarily release lock_.
4e4e5a6f 2378 ALWAYS_INLINE void ReleaseToSpans(void* object);
b37bf2e1
A
2379
2380 // REQUIRES: lock_ is held
2381 // Populate cache by fetching from the page heap.
2382 // May temporarily release lock_.
4e4e5a6f 2383 ALWAYS_INLINE void Populate();
b37bf2e1
A
2384
2385 // REQUIRES: lock is held.
2386 // Tries to make room for a TCEntry. If the cache is full it will try to
2387 // expand it at the cost of some other cache size. Return false if there is
2388 // no space.
2389 bool MakeCacheSpace();
2390
2391 // REQUIRES: lock_ for locked_size_class is held.
2392 // Picks a "random" size class to steal TCEntry slot from. In reality it
2393 // just iterates over the sizeclasses but does so without taking a lock.
2394 // Returns true on success.
2395 // May temporarily lock a "random" size class.
4e4e5a6f 2396 static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force);
b37bf2e1
A
2397
2398 // REQUIRES: lock_ is *not* held.
2399 // Tries to shrink the Cache. If force is true it will relase objects to
2400 // spans if it allows it to shrink the cache. Return false if it failed to
2401 // shrink the cache. Decrements cache_size_ on succeess.
2402 // May temporarily take lock_. If it takes lock_, the locked_size_class
2403 // lock is released to the thread from holding two size class locks
2404 // concurrently which could lead to a deadlock.
2405 bool ShrinkCache(int locked_size_class, bool force);
2406
2407 // This lock protects all the data members. cached_entries and cache_size_
2408 // may be looked at without holding the lock.
2409 SpinLock lock_;
2410
2411 // We keep linked lists of empty and non-empty spans.
2412 size_t size_class_; // My size class
2413 Span empty_; // Dummy header for list of empty spans
2414 Span nonempty_; // Dummy header for list of non-empty spans
2415 size_t counter_; // Number of free objects in cache entry
2416
2417 // Here we reserve space for TCEntry cache slots. Since one size class can
2418 // end up getting all the TCEntries quota in the system we just preallocate
2419 // sufficient number of entries here.
2420 TCEntry tc_slots_[kNumTransferEntries];
2421
2422 // Number of currently used cached entries in tc_slots_. This variable is
2423 // updated under a lock but can be read without one.
2424 int32_t used_slots_;
2425 // The current number of slots for this size class. This is an
2426 // adaptive value that is increased if there is lots of traffic
2427 // on a given size class.
2428 int32_t cache_size_;
2429};
2430
2431// Pad each CentralCache object to multiple of 64 bytes
2432class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
2433 private:
2434 char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
2435};
2436
2437//-------------------------------------------------------------------
2438// Global variables
2439//-------------------------------------------------------------------
2440
2441// Central cache -- a collection of free-lists, one per size-class.
2442// We have a separate lock per free-list to reduce contention.
2443static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
2444
2445// Page-level allocator
4e4e5a6f 2446static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)];
b37bf2e1
A
2447static bool phinited = false;
2448
2449// Avoid extra level of indirection by making "pageheap" be just an alias
2450// of pageheap_memory.
2451typedef union {
2452 void* m_memory;
2453 TCMalloc_PageHeap* m_pageHeap;
2454} PageHeapUnion;
2455
2456static inline TCMalloc_PageHeap* getPageHeap()
2457{
2458 PageHeapUnion u = { &pageheap_memory[0] };
2459 return u.m_pageHeap;
2460}
2461
2462#define pageheap getPageHeap()
2463
ba379fdc 2464#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
f9bf01c6 2465
14957cd0
A
2466#if HAVE(DISPATCH_H) || OS(WINDOWS)
2467
2468void TCMalloc_PageHeap::periodicScavenge()
ba379fdc 2469{
14957cd0
A
2470 SpinLockHolder h(&pageheap_lock);
2471 pageheap->scavenge();
2472
2473 if (shouldScavenge()) {
2474 rescheduleScavenger();
2475 return;
2476 }
2477
2478 suspendScavenger();
ba379fdc 2479}
14957cd0
A
2480
2481ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
2482{
2483 ASSERT(pageheap_lock.IsHeld());
2484 if (isScavengerSuspended() && shouldScavenge())
2485 scheduleScavenger();
2486}
2487
2488#else
ba379fdc
A
2489
2490void TCMalloc_PageHeap::scavengerThread()
2491{
2492#if HAVE(PTHREAD_SETNAME_NP)
2493 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2494#endif
2495
2496 while (1) {
4e4e5a6f 2497 if (!shouldScavenge()) {
ba379fdc
A
2498 pthread_mutex_lock(&m_scavengeMutex);
2499 m_scavengeThreadActive = false;
4e4e5a6f 2500 // Block until there are enough free committed pages to release back to the system.
ba379fdc
A
2501 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
2502 m_scavengeThreadActive = true;
2503 pthread_mutex_unlock(&m_scavengeMutex);
2504 }
4e4e5a6f 2505 sleep(kScavengeDelayInSeconds);
ba379fdc
A
2506 {
2507 SpinLockHolder h(&pageheap_lock);
2508 pageheap->scavenge();
2509 }
2510 }
2511}
f9bf01c6 2512
14957cd0 2513#endif
f9bf01c6 2514
ba379fdc
A
2515#endif
2516
b37bf2e1
A
2517// If TLS is available, we also store a copy
2518// of the per-thread object in a __thread variable
2519// since __thread variables are faster to read
2520// than pthread_getspecific(). We still need
2521// pthread_setspecific() because __thread
2522// variables provide no way to run cleanup
2523// code when a thread is destroyed.
2524#ifdef HAVE_TLS
2525static __thread TCMalloc_ThreadCache *threadlocal_heap;
2526#endif
2527// Thread-specific key. Initialization here is somewhat tricky
2528// because some Linux startup code invokes malloc() before it
2529// is in a good enough state to handle pthread_keycreate().
2530// Therefore, we use TSD keys only after tsd_inited is set to true.
2531// Until then, we use a slow path to get the heap object.
2532static bool tsd_inited = false;
4e4e5a6f 2533#if USE(PTHREAD_GETSPECIFIC_DIRECT)
14957cd0 2534static const pthread_key_t heap_key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
4e4e5a6f 2535#else
b37bf2e1 2536static pthread_key_t heap_key;
4e4e5a6f 2537#endif
14957cd0 2538#if OS(WINDOWS)
b37bf2e1
A
2539DWORD tlsIndex = TLS_OUT_OF_INDEXES;
2540#endif
2541
2542static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
2543{
14957cd0
A
2544#if USE(PTHREAD_GETSPECIFIC_DIRECT)
2545 // Can't have two libraries both doing this in the same process,
2546 // so check and make this crash right away.
2547 if (pthread_getspecific(heap_key))
2548 CRASH();
2549#endif
2550
2551 // Still do pthread_setspecific even if there's an alternate form
2552 // of thread-local storage in use, to benefit from the delete callback.
b37bf2e1 2553 pthread_setspecific(heap_key, heap);
14957cd0
A
2554
2555#if OS(WINDOWS)
b37bf2e1
A
2556 TlsSetValue(tlsIndex, heap);
2557#endif
2558}
2559
2560// Allocator for thread heaps
2561static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
2562
2563// Linked list of heap objects. Protected by pageheap_lock.
2564static TCMalloc_ThreadCache* thread_heaps = NULL;
2565static int thread_heap_count = 0;
2566
2567// Overall thread cache size. Protected by pageheap_lock.
2568static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
2569
2570// Global per-thread cache size. Writes are protected by
2571// pageheap_lock. Reads are done without any locking, which should be
2572// fine as long as size_t can be written atomically and we don't place
2573// invariants between this variable and other pieces of state.
2574static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
2575
2576//-------------------------------------------------------------------
2577// Central cache implementation
2578//-------------------------------------------------------------------
2579
2580void TCMalloc_Central_FreeList::Init(size_t cl) {
2581 lock_.Init();
2582 size_class_ = cl;
2583 DLL_Init(&empty_);
2584 DLL_Init(&nonempty_);
2585 counter_ = 0;
2586
2587 cache_size_ = 1;
2588 used_slots_ = 0;
2589 ASSERT(cache_size_ <= kNumTransferEntries);
2590}
2591
2592void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
2593 while (start) {
2594 void *next = SLL_Next(start);
2595 ReleaseToSpans(start);
2596 start = next;
2597 }
2598}
2599
2600ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
2601 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
2602 Span* span = pageheap->GetDescriptor(p);
2603 ASSERT(span != NULL);
2604 ASSERT(span->refcount > 0);
2605
2606 // If span is empty, move it to non-empty list
2607 if (span->objects == NULL) {
2608 DLL_Remove(span);
2609 DLL_Prepend(&nonempty_, span);
2610 Event(span, 'N', 0);
2611 }
2612
2613 // The following check is expensive, so it is disabled by default
2614 if (false) {
2615 // Check that object does not occur in list
f9bf01c6 2616 unsigned got = 0;
b37bf2e1
A
2617 for (void* p = span->objects; p != NULL; p = *((void**) p)) {
2618 ASSERT(p != object);
2619 got++;
2620 }
2621 ASSERT(got + span->refcount ==
2622 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
2623 }
2624
2625 counter_++;
2626 span->refcount--;
2627 if (span->refcount == 0) {
2628 Event(span, '#', 0);
2629 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
2630 DLL_Remove(span);
2631
2632 // Release central list lock while operating on pageheap
2633 lock_.Unlock();
2634 {
2635 SpinLockHolder h(&pageheap_lock);
2636 pageheap->Delete(span);
2637 }
2638 lock_.Lock();
2639 } else {
2640 *(reinterpret_cast<void**>(object)) = span->objects;
2641 span->objects = object;
2642 }
2643}
2644
2645ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2646 size_t locked_size_class, bool force) {
2647 static int race_counter = 0;
2648 int t = race_counter++; // Updated without a lock, but who cares.
2649 if (t >= static_cast<int>(kNumClasses)) {
2650 while (t >= static_cast<int>(kNumClasses)) {
2651 t -= kNumClasses;
2652 }
2653 race_counter = t;
2654 }
2655 ASSERT(t >= 0);
2656 ASSERT(t < static_cast<int>(kNumClasses));
2657 if (t == static_cast<int>(locked_size_class)) return false;
2658 return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
2659}
2660
2661bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2662 // Is there room in the cache?
2663 if (used_slots_ < cache_size_) return true;
2664 // Check if we can expand this cache?
2665 if (cache_size_ == kNumTransferEntries) return false;
2666 // Ok, we'll try to grab an entry from some other size class.
2667 if (EvictRandomSizeClass(size_class_, false) ||
2668 EvictRandomSizeClass(size_class_, true)) {
2669 // Succeeded in evicting, we're going to make our cache larger.
2670 cache_size_++;
2671 return true;
2672 }
2673 return false;
2674}
2675
2676
2677namespace {
2678class LockInverter {
2679 private:
2680 SpinLock *held_, *temp_;
2681 public:
2682 inline explicit LockInverter(SpinLock* held, SpinLock *temp)
2683 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
2684 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
2685};
2686}
2687
2688bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
2689 // Start with a quick check without taking a lock.
2690 if (cache_size_ == 0) return false;
2691 // We don't evict from a full cache unless we are 'forcing'.
2692 if (force == false && used_slots_ == cache_size_) return false;
2693
2694 // Grab lock, but first release the other lock held by this thread. We use
2695 // the lock inverter to ensure that we never hold two size class locks
2696 // concurrently. That can create a deadlock because there is no well
2697 // defined nesting order.
2698 LockInverter li(&central_cache[locked_size_class].lock_, &lock_);
2699 ASSERT(used_slots_ <= cache_size_);
2700 ASSERT(0 <= cache_size_);
2701 if (cache_size_ == 0) return false;
2702 if (used_slots_ == cache_size_) {
2703 if (force == false) return false;
2704 // ReleaseListToSpans releases the lock, so we have to make all the
2705 // updates to the central list before calling it.
2706 cache_size_--;
2707 used_slots_--;
2708 ReleaseListToSpans(tc_slots_[used_slots_].head);
2709 return true;
2710 }
2711 cache_size_--;
2712 return true;
2713}
2714
2715void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
2716 SpinLockHolder h(&lock_);
2717 if (N == num_objects_to_move[size_class_] &&
2718 MakeCacheSpace()) {
2719 int slot = used_slots_++;
2720 ASSERT(slot >=0);
2721 ASSERT(slot < kNumTransferEntries);
2722 TCEntry *entry = &tc_slots_[slot];
2723 entry->head = start;
2724 entry->tail = end;
2725 return;
2726 }
2727 ReleaseListToSpans(start);
2728}
2729
2730void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
2731 int num = *N;
2732 ASSERT(num > 0);
2733
2734 SpinLockHolder h(&lock_);
2735 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
2736 int slot = --used_slots_;
2737 ASSERT(slot >= 0);
2738 TCEntry *entry = &tc_slots_[slot];
2739 *start = entry->head;
2740 *end = entry->tail;
2741 return;
2742 }
2743
2744 // TODO: Prefetch multiple TCEntries?
2745 void *tail = FetchFromSpansSafe();
2746 if (!tail) {
2747 // We are completely out of memory.
2748 *start = *end = NULL;
2749 *N = 0;
2750 return;
2751 }
2752
2753 SLL_SetNext(tail, NULL);
2754 void *head = tail;
2755 int count = 1;
2756 while (count < num) {
2757 void *t = FetchFromSpans();
2758 if (!t) break;
2759 SLL_Push(&head, t);
2760 count++;
2761 }
2762 *start = head;
2763 *end = tail;
2764 *N = count;
2765}
2766
2767
2768void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2769 void *t = FetchFromSpans();
2770 if (!t) {
2771 Populate();
2772 t = FetchFromSpans();
2773 }
2774 return t;
2775}
2776
2777void* TCMalloc_Central_FreeList::FetchFromSpans() {
2778 if (DLL_IsEmpty(&nonempty_)) return NULL;
2779 Span* span = nonempty_.next;
2780
2781 ASSERT(span->objects != NULL);
9dae56ea 2782 ASSERT_SPAN_COMMITTED(span);
b37bf2e1
A
2783 span->refcount++;
2784 void* result = span->objects;
2785 span->objects = *(reinterpret_cast<void**>(result));
2786 if (span->objects == NULL) {
2787 // Move to empty list
2788 DLL_Remove(span);
2789 DLL_Prepend(&empty_, span);
2790 Event(span, 'E', 0);
2791 }
2792 counter_--;
2793 return result;
2794}
2795
2796// Fetch memory from the system and add to the central cache freelist.
2797ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
2798 // Release central list lock while operating on pageheap
2799 lock_.Unlock();
2800 const size_t npages = class_to_pages[size_class_];
2801
2802 Span* span;
2803 {
2804 SpinLockHolder h(&pageheap_lock);
2805 span = pageheap->New(npages);
2806 if (span) pageheap->RegisterSizeClass(span, size_class_);
2807 }
2808 if (span == NULL) {
14957cd0 2809#if HAVE(ERRNO_H)
b37bf2e1 2810 MESSAGE("allocation failed: %d\n", errno);
14957cd0
A
2811#elif OS(WINDOWS)
2812 MESSAGE("allocation failed: %d\n", ::GetLastError());
2813#else
2814 MESSAGE("allocation failed\n");
2815#endif
b37bf2e1
A
2816 lock_.Lock();
2817 return;
2818 }
9dae56ea 2819 ASSERT_SPAN_COMMITTED(span);
b37bf2e1
A
2820 ASSERT(span->length == npages);
2821 // Cache sizeclass info eagerly. Locking is not necessary.
2822 // (Instead of being eager, we could just replace any stale info
2823 // about this span, but that seems to be no better in practice.)
2824 for (size_t i = 0; i < npages; i++) {
2825 pageheap->CacheSizeClass(span->start + i, size_class_);
2826 }
2827
2828 // Split the block into pieces and add to the free-list
2829 // TODO: coloring of objects to avoid cache conflicts?
2830 void** tail = &span->objects;
2831 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
2832 char* limit = ptr + (npages << kPageShift);
2833 const size_t size = ByteSizeForClass(size_class_);
2834 int num = 0;
2835 char* nptr;
2836 while ((nptr = ptr + size) <= limit) {
2837 *tail = ptr;
14957cd0 2838 tail = reinterpret_cast_ptr<void**>(ptr);
b37bf2e1
A
2839 ptr = nptr;
2840 num++;
2841 }
2842 ASSERT(ptr <= limit);
2843 *tail = NULL;
2844 span->refcount = 0; // No sub-object in use yet
2845
2846 // Add span to list of non-empty spans
2847 lock_.Lock();
2848 DLL_Prepend(&nonempty_, span);
2849 counter_ += num;
2850}
2851
2852//-------------------------------------------------------------------
2853// TCMalloc_ThreadCache implementation
2854//-------------------------------------------------------------------
2855
2856inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
2857 if (bytes_until_sample_ < k) {
2858 PickNextSample(k);
2859 return true;
2860 } else {
2861 bytes_until_sample_ -= k;
2862 return false;
2863 }
2864}
2865
2866void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) {
2867 size_ = 0;
2868 next_ = NULL;
2869 prev_ = NULL;
2870 tid_ = tid;
2871 in_setspecific_ = false;
2872 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2873 list_[cl].Init();
2874 }
2875
2876 // Initialize RNG -- run it for a bit to get to good values
2877 bytes_until_sample_ = 0;
2878 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2879 for (int i = 0; i < 100; i++) {
2880 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
2881 }
2882}
2883
2884void TCMalloc_ThreadCache::Cleanup() {
2885 // Put unused memory back into central cache
2886 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2887 if (list_[cl].length() > 0) {
2888 ReleaseToCentralCache(cl, list_[cl].length());
2889 }
2890 }
2891}
2892
2893ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
2894 ASSERT(size <= kMaxSize);
2895 const size_t cl = SizeClass(size);
2896 FreeList* list = &list_[cl];
2897 size_t allocationSize = ByteSizeForClass(cl);
2898 if (list->empty()) {
2899 FetchFromCentralCache(cl, allocationSize);
2900 if (list->empty()) return NULL;
2901 }
2902 size_ -= allocationSize;
2903 return list->Pop();
2904}
2905
2906inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
2907 size_ += ByteSizeForClass(cl);
2908 FreeList* list = &list_[cl];
2909 list->Push(ptr);
2910 // If enough data is free, put back into central cache
2911 if (list->length() > kMaxFreeListLength) {
2912 ReleaseToCentralCache(cl, num_objects_to_move[cl]);
2913 }
2914 if (size_ >= per_thread_cache_size) Scavenge();
2915}
2916
2917// Remove some objects of class "cl" from central cache and add to thread heap
2918ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
2919 int fetch_count = num_objects_to_move[cl];
2920 void *start, *end;
2921 central_cache[cl].RemoveRange(&start, &end, &fetch_count);
2922 list_[cl].PushRange(fetch_count, start, end);
2923 size_ += allocationSize * fetch_count;
2924}
2925
2926// Remove some objects of class "cl" from thread heap and add to central cache
2927inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
2928 ASSERT(N > 0);
2929 FreeList* src = &list_[cl];
2930 if (N > src->length()) N = src->length();
2931 size_ -= N*ByteSizeForClass(cl);
2932
2933 // We return prepackaged chains of the correct size to the central cache.
2934 // TODO: Use the same format internally in the thread caches?
2935 int batch_size = num_objects_to_move[cl];
2936 while (N > batch_size) {
2937 void *tail, *head;
2938 src->PopRange(batch_size, &head, &tail);
2939 central_cache[cl].InsertRange(head, tail, batch_size);
2940 N -= batch_size;
2941 }
2942 void *tail, *head;
2943 src->PopRange(N, &head, &tail);
2944 central_cache[cl].InsertRange(head, tail, N);
2945}
2946
2947// Release idle memory to the central cache
2948inline void TCMalloc_ThreadCache::Scavenge() {
2949 // If the low-water mark for the free list is L, it means we would
2950 // not have had to allocate anything from the central cache even if
2951 // we had reduced the free list size by L. We aim to get closer to
2952 // that situation by dropping L/2 nodes from the free list. This
2953 // may not release much memory, but if so we will call scavenge again
2954 // pretty soon and the low-water marks will be high on that call.
2955 //int64 start = CycleClock::Now();
2956
2957 for (size_t cl = 0; cl < kNumClasses; cl++) {
2958 FreeList* list = &list_[cl];
2959 const int lowmark = list->lowwatermark();
2960 if (lowmark > 0) {
2961 const int drop = (lowmark > 1) ? lowmark/2 : 1;
2962 ReleaseToCentralCache(cl, drop);
2963 }
2964 list->clear_lowwatermark();
2965 }
2966
2967 //int64 finish = CycleClock::Now();
2968 //CycleTimer ct;
2969 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2970}
2971
2972void TCMalloc_ThreadCache::PickNextSample(size_t k) {
2973 // Make next "random" number
2974 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2975 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2976 uint32_t r = rnd_;
2977 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
2978
2979 // Next point is "rnd_ % (sample_period)". I.e., average
2980 // increment is "sample_period/2".
2981 const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
2982 static int last_flag_value = -1;
2983
2984 if (flag_value != last_flag_value) {
2985 SpinLockHolder h(&sample_period_lock);
2986 int i;
2987 for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
2988 if (primes_list[i] >= flag_value) {
2989 break;
2990 }
2991 }
2992 sample_period = primes_list[i];
2993 last_flag_value = flag_value;
2994 }
2995
2996 bytes_until_sample_ += rnd_ % sample_period;
2997
2998 if (k > (static_cast<size_t>(-1) >> 2)) {
2999 // If the user has asked for a huge allocation then it is possible
3000 // for the code below to loop infinitely. Just return (note that
3001 // this throws off the sampling accuracy somewhat, but a user who
3002 // is allocating more than 1G of memory at a time can live with a
3003 // minor inaccuracy in profiling of small allocations, and also
3004 // would rather not wait for the loop below to terminate).
3005 return;
3006 }
3007
3008 while (bytes_until_sample_ < k) {
3009 // Increase bytes_until_sample_ by enough average sampling periods
3010 // (sample_period >> 1) to allow us to sample past the current
3011 // allocation.
3012 bytes_until_sample_ += (sample_period >> 1);
3013 }
3014
3015 bytes_until_sample_ -= k;
3016}
3017
3018void TCMalloc_ThreadCache::InitModule() {
3019 // There is a slight potential race here because of double-checked
3020 // locking idiom. However, as long as the program does a small
3021 // allocation before switching to multi-threaded mode, we will be
3022 // fine. We increase the chances of doing such a small allocation
3023 // by doing one in the constructor of the module_enter_exit_hook
3024 // object declared below.
3025 SpinLockHolder h(&pageheap_lock);
3026 if (!phinited) {
3027#ifdef WTF_CHANGES
3028 InitTSD();
3029#endif
3030 InitSizeClasses();
3031 threadheap_allocator.Init();
3032 span_allocator.Init();
3033 span_allocator.New(); // Reduce cache conflicts
3034 span_allocator.New(); // Reduce cache conflicts
3035 stacktrace_allocator.Init();
3036 DLL_Init(&sampled_objects);
3037 for (size_t i = 0; i < kNumClasses; ++i) {
3038 central_cache[i].Init(i);
3039 }
3040 pageheap->init();
3041 phinited = 1;
f9bf01c6 3042#if defined(WTF_CHANGES) && OS(DARWIN)
b37bf2e1
A
3043 FastMallocZone::init();
3044#endif
3045 }
3046}
3047
3048inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) {
3049 // Create the heap and add it to the linked list
3050 TCMalloc_ThreadCache *heap = threadheap_allocator.New();
3051 heap->Init(tid);
3052 heap->next_ = thread_heaps;
3053 heap->prev_ = NULL;
3054 if (thread_heaps != NULL) thread_heaps->prev_ = heap;
3055 thread_heaps = heap;
3056 thread_heap_count++;
3057 RecomputeThreadCacheSize();
3058 return heap;
3059}
3060
3061inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
3062#ifdef HAVE_TLS
3063 // __thread is faster, but only when the kernel supports it
3064 if (KernelSupportsTLS())
3065 return threadlocal_heap;
14957cd0 3066#elif OS(WINDOWS)
b37bf2e1
A
3067 return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
3068#else
3069 return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
3070#endif
3071}
3072
3073inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
3074 TCMalloc_ThreadCache* ptr = NULL;
3075 if (!tsd_inited) {
3076 InitModule();
3077 } else {
3078 ptr = GetThreadHeap();
3079 }
3080 if (ptr == NULL) ptr = CreateCacheIfNecessary();
3081 return ptr;
3082}
3083
3084// In deletion paths, we do not try to create a thread-cache. This is
3085// because we may be in the thread destruction code and may have
3086// already cleaned up the cache for this thread.
3087inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
3088 if (!tsd_inited) return NULL;
3089 void* const p = GetThreadHeap();
3090 return reinterpret_cast<TCMalloc_ThreadCache*>(p);
3091}
3092
3093void TCMalloc_ThreadCache::InitTSD() {
3094 ASSERT(!tsd_inited);
4e4e5a6f
A
3095#if USE(PTHREAD_GETSPECIFIC_DIRECT)
3096 pthread_key_init_np(heap_key, DestroyThreadCache);
3097#else
b37bf2e1 3098 pthread_key_create(&heap_key, DestroyThreadCache);
4e4e5a6f 3099#endif
14957cd0 3100#if OS(WINDOWS)
b37bf2e1
A
3101 tlsIndex = TlsAlloc();
3102#endif
3103 tsd_inited = true;
3104
14957cd0 3105#if !OS(WINDOWS)
b37bf2e1
A
3106 // We may have used a fake pthread_t for the main thread. Fix it.
3107 pthread_t zero;
3108 memset(&zero, 0, sizeof(zero));
3109#endif
3110#ifndef WTF_CHANGES
3111 SpinLockHolder h(&pageheap_lock);
3112#else
3113 ASSERT(pageheap_lock.IsHeld());
3114#endif
3115 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
14957cd0 3116#if OS(WINDOWS)
b37bf2e1
A
3117 if (h->tid_ == 0) {
3118 h->tid_ = GetCurrentThreadId();
3119 }
3120#else
3121 if (pthread_equal(h->tid_, zero)) {
3122 h->tid_ = pthread_self();
3123 }
3124#endif
3125 }
3126}
3127
3128TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
3129 // Initialize per-thread data if necessary
3130 TCMalloc_ThreadCache* heap = NULL;
3131 {
3132 SpinLockHolder h(&pageheap_lock);
3133
14957cd0 3134#if OS(WINDOWS)
b37bf2e1
A
3135 DWORD me;
3136 if (!tsd_inited) {
3137 me = 0;
3138 } else {
3139 me = GetCurrentThreadId();
3140 }
3141#else
3142 // Early on in glibc's life, we cannot even call pthread_self()
3143 pthread_t me;
3144 if (!tsd_inited) {
3145 memset(&me, 0, sizeof(me));
3146 } else {
3147 me = pthread_self();
3148 }
3149#endif
3150
3151 // This may be a recursive malloc call from pthread_setspecific()
3152 // In that case, the heap for this thread has already been created
3153 // and added to the linked list. So we search for that first.
3154 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
14957cd0 3155#if OS(WINDOWS)
b37bf2e1
A
3156 if (h->tid_ == me) {
3157#else
3158 if (pthread_equal(h->tid_, me)) {
3159#endif
3160 heap = h;
3161 break;
3162 }
3163 }
3164
3165 if (heap == NULL) heap = NewHeap(me);
3166 }
3167
3168 // We call pthread_setspecific() outside the lock because it may
3169 // call malloc() recursively. The recursive call will never get
3170 // here again because it will find the already allocated heap in the
3171 // linked list of heaps.
3172 if (!heap->in_setspecific_ && tsd_inited) {
3173 heap->in_setspecific_ = true;
3174 setThreadHeap(heap);
3175 }
3176 return heap;
3177}
3178
3179void TCMalloc_ThreadCache::BecomeIdle() {
3180 if (!tsd_inited) return; // No caches yet
3181 TCMalloc_ThreadCache* heap = GetThreadHeap();
3182 if (heap == NULL) return; // No thread cache to remove
3183 if (heap->in_setspecific_) return; // Do not disturb the active caller
3184
3185 heap->in_setspecific_ = true;
14957cd0 3186 setThreadHeap(NULL);
b37bf2e1
A
3187#ifdef HAVE_TLS
3188 // Also update the copy in __thread
3189 threadlocal_heap = NULL;
3190#endif
3191 heap->in_setspecific_ = false;
3192 if (GetThreadHeap() == heap) {
3193 // Somehow heap got reinstated by a recursive call to malloc
3194 // from pthread_setspecific. We give up in this case.
3195 return;
3196 }
3197
3198 // We can now get rid of the heap
3199 DeleteCache(heap);
3200}
3201
3202void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
3203 // Note that "ptr" cannot be NULL since pthread promises not
3204 // to invoke the destructor on NULL values, but for safety,
3205 // we check anyway.
3206 if (ptr == NULL) return;
3207#ifdef HAVE_TLS
3208 // Prevent fast path of GetThreadHeap() from returning heap.
3209 threadlocal_heap = NULL;
3210#endif
3211 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
3212}
3213
3214void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
3215 // Remove all memory from heap
3216 heap->Cleanup();
3217
3218 // Remove from linked list
3219 SpinLockHolder h(&pageheap_lock);
3220 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
3221 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
3222 if (thread_heaps == heap) thread_heaps = heap->next_;
3223 thread_heap_count--;
3224 RecomputeThreadCacheSize();
3225
3226 threadheap_allocator.Delete(heap);
3227}
3228
3229void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
3230 // Divide available space across threads
3231 int n = thread_heap_count > 0 ? thread_heap_count : 1;
3232 size_t space = overall_thread_cache_size / n;
3233
3234 // Limit to allowed range
3235 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
3236 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
3237
3238 per_thread_cache_size = space;
3239}
3240
3241void TCMalloc_ThreadCache::Print() const {
3242 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3243 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
3244 ByteSizeForClass(cl),
3245 list_[cl].length(),
3246 list_[cl].lowwatermark());
3247 }
3248}
3249
3250// Extract interesting stats
3251struct TCMallocStats {
3252 uint64_t system_bytes; // Bytes alloced from system
3253 uint64_t thread_bytes; // Bytes in thread caches
3254 uint64_t central_bytes; // Bytes in central cache
3255 uint64_t transfer_bytes; // Bytes in central transfer cache
3256 uint64_t pageheap_bytes; // Bytes in page heap
3257 uint64_t metadata_bytes; // Bytes alloced for metadata
3258};
3259
3260#ifndef WTF_CHANGES
3261// Get stats into "r". Also get per-size-class counts if class_count != NULL
3262static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
3263 r->central_bytes = 0;
3264 r->transfer_bytes = 0;
3265 for (int cl = 0; cl < kNumClasses; ++cl) {
3266 const int length = central_cache[cl].length();
3267 const int tc_length = central_cache[cl].tc_length();
3268 r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
3269 r->transfer_bytes +=
3270 static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
3271 if (class_count) class_count[cl] = length + tc_length;
3272 }
3273
3274 // Add stats from per-thread heaps
3275 r->thread_bytes = 0;
3276 { // scope
3277 SpinLockHolder h(&pageheap_lock);
3278 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3279 r->thread_bytes += h->Size();
3280 if (class_count) {
3281 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3282 class_count[cl] += h->freelist_length(cl);
3283 }
3284 }
3285 }
3286 }
3287
3288 { //scope
3289 SpinLockHolder h(&pageheap_lock);
3290 r->system_bytes = pageheap->SystemBytes();
3291 r->metadata_bytes = metadata_system_bytes;
3292 r->pageheap_bytes = pageheap->FreeBytes();
3293 }
3294}
3295#endif
3296
3297#ifndef WTF_CHANGES
3298// WRITE stats to "out"
3299static void DumpStats(TCMalloc_Printer* out, int level) {
3300 TCMallocStats stats;
3301 uint64_t class_count[kNumClasses];
3302 ExtractStats(&stats, (level >= 2 ? class_count : NULL));
3303
3304 if (level >= 2) {
3305 out->printf("------------------------------------------------\n");
3306 uint64_t cumulative = 0;
3307 for (int cl = 0; cl < kNumClasses; ++cl) {
3308 if (class_count[cl] > 0) {
3309 uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
3310 cumulative += class_bytes;
3311 out->printf("class %3d [ %8" PRIuS " bytes ] : "
3312 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
3313 cl, ByteSizeForClass(cl),
3314 class_count[cl],
3315 class_bytes / 1048576.0,
3316 cumulative / 1048576.0);
3317 }
3318 }
3319
3320 SpinLockHolder h(&pageheap_lock);
3321 pageheap->Dump(out);
3322 }
3323
3324 const uint64_t bytes_in_use = stats.system_bytes
3325 - stats.pageheap_bytes
3326 - stats.central_bytes
3327 - stats.transfer_bytes
3328 - stats.thread_bytes;
3329
3330 out->printf("------------------------------------------------\n"
3331 "MALLOC: %12" PRIu64 " Heap size\n"
3332 "MALLOC: %12" PRIu64 " Bytes in use by application\n"
3333 "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
3334 "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
3335 "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
3336 "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
3337 "MALLOC: %12" PRIu64 " Spans in use\n"
3338 "MALLOC: %12" PRIu64 " Thread heaps in use\n"
3339 "MALLOC: %12" PRIu64 " Metadata allocated\n"
3340 "------------------------------------------------\n",
3341 stats.system_bytes,
3342 bytes_in_use,
3343 stats.pageheap_bytes,
3344 stats.central_bytes,
3345 stats.transfer_bytes,
3346 stats.thread_bytes,
3347 uint64_t(span_allocator.inuse()),
3348 uint64_t(threadheap_allocator.inuse()),
3349 stats.metadata_bytes);
3350}
3351
3352static void PrintStats(int level) {
3353 const int kBufferSize = 16 << 10;
3354 char* buffer = new char[kBufferSize];
3355 TCMalloc_Printer printer(buffer, kBufferSize);
3356 DumpStats(&printer, level);
3357 write(STDERR_FILENO, buffer, strlen(buffer));
3358 delete[] buffer;
3359}
3360
3361static void** DumpStackTraces() {
3362 // Count how much space we need
3363 int needed_slots = 0;
3364 {
3365 SpinLockHolder h(&pageheap_lock);
3366 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3367 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3368 needed_slots += 3 + stack->depth;
3369 }
3370 needed_slots += 100; // Slop in case sample grows
3371 needed_slots += needed_slots/8; // An extra 12.5% slop
3372 }
3373
3374 void** result = new void*[needed_slots];
3375 if (result == NULL) {
3376 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
3377 needed_slots);
3378 return NULL;
3379 }
3380
3381 SpinLockHolder h(&pageheap_lock);
3382 int used_slots = 0;
3383 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3384 ASSERT(used_slots < needed_slots); // Need to leave room for terminator
3385 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3386 if (used_slots + 3 + stack->depth >= needed_slots) {
3387 // No more room
3388 break;
3389 }
3390
3391 result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
3392 result[used_slots+1] = reinterpret_cast<void*>(stack->size);
3393 result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
3394 for (int d = 0; d < stack->depth; d++) {
3395 result[used_slots+3+d] = stack->stack[d];
3396 }
3397 used_slots += 3 + stack->depth;
3398 }
3399 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
3400 return result;
3401}
3402#endif
3403
3404#ifndef WTF_CHANGES
3405
3406// TCMalloc's support for extra malloc interfaces
3407class TCMallocImplementation : public MallocExtension {
3408 public:
3409 virtual void GetStats(char* buffer, int buffer_length) {
3410 ASSERT(buffer_length > 0);
3411 TCMalloc_Printer printer(buffer, buffer_length);
3412
3413 // Print level one stats unless lots of space is available
3414 if (buffer_length < 10000) {
3415 DumpStats(&printer, 1);
3416 } else {
3417 DumpStats(&printer, 2);
3418 }
3419 }
3420
3421 virtual void** ReadStackTraces() {
3422 return DumpStackTraces();
3423 }
3424
3425 virtual bool GetNumericProperty(const char* name, size_t* value) {
3426 ASSERT(name != NULL);
3427
3428 if (strcmp(name, "generic.current_allocated_bytes") == 0) {
3429 TCMallocStats stats;
3430 ExtractStats(&stats, NULL);
3431 *value = stats.system_bytes
3432 - stats.thread_bytes
3433 - stats.central_bytes
3434 - stats.pageheap_bytes;
3435 return true;
3436 }
3437
3438 if (strcmp(name, "generic.heap_size") == 0) {
3439 TCMallocStats stats;
3440 ExtractStats(&stats, NULL);
3441 *value = stats.system_bytes;
3442 return true;
3443 }
3444
3445 if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
3446 // We assume that bytes in the page heap are not fragmented too
3447 // badly, and are therefore available for allocation.
3448 SpinLockHolder l(&pageheap_lock);
3449 *value = pageheap->FreeBytes();
3450 return true;
3451 }
3452
3453 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3454 SpinLockHolder l(&pageheap_lock);
3455 *value = overall_thread_cache_size;
3456 return true;
3457 }
3458
3459 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
3460 TCMallocStats stats;
3461 ExtractStats(&stats, NULL);
3462 *value = stats.thread_bytes;
3463 return true;
3464 }
3465
3466 return false;
3467 }
3468
3469 virtual bool SetNumericProperty(const char* name, size_t value) {
3470 ASSERT(name != NULL);
3471
3472 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3473 // Clip the value to a reasonable range
3474 if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
3475 if (value > (1<<30)) value = (1<<30); // Limit to 1GB
3476
3477 SpinLockHolder l(&pageheap_lock);
3478 overall_thread_cache_size = static_cast<size_t>(value);
3479 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
3480 return true;
3481 }
3482
3483 return false;
3484 }
3485
3486 virtual void MarkThreadIdle() {
3487 TCMalloc_ThreadCache::BecomeIdle();
3488 }
3489
3490 virtual void ReleaseFreeMemory() {
3491 SpinLockHolder h(&pageheap_lock);
3492 pageheap->ReleaseFreePages();
3493 }
3494};
3495#endif
3496
3497// The constructor allocates an object to ensure that initialization
3498// runs before main(), and therefore we do not have a chance to become
3499// multi-threaded before initialization. We also create the TSD key
3500// here. Presumably by the time this constructor runs, glibc is in
3501// good enough shape to handle pthread_key_create().
3502//
3503// The constructor also takes the opportunity to tell STL to use
3504// tcmalloc. We want to do this early, before construct time, so
3505// all user STL allocations go through tcmalloc (which works really
3506// well for STL).
3507//
3508// The destructor prints stats when the program exits.
3509class TCMallocGuard {
3510 public:
3511
3512 TCMallocGuard() {
3513#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3514 // Check whether the kernel also supports TLS (needs to happen at runtime)
3515 CheckIfKernelSupportsTLS();
3516#endif
3517#ifndef WTF_CHANGES
3518#ifdef WIN32 // patch the windows VirtualAlloc, etc.
3519 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
3520#endif
3521#endif
3522 free(malloc(1));
3523 TCMalloc_ThreadCache::InitTSD();
3524 free(malloc(1));
3525#ifndef WTF_CHANGES
3526 MallocExtension::Register(new TCMallocImplementation);
3527#endif
3528 }
3529
3530#ifndef WTF_CHANGES
3531 ~TCMallocGuard() {
3532 const char* env = getenv("MALLOCSTATS");
3533 if (env != NULL) {
3534 int level = atoi(env);
3535 if (level < 1) level = 1;
3536 PrintStats(level);
3537 }
3538#ifdef WIN32
3539 UnpatchWindowsFunctions();
3540#endif
3541 }
3542#endif
3543};
3544
3545#ifndef WTF_CHANGES
3546static TCMallocGuard module_enter_exit_hook;
3547#endif
3548
3549
3550//-------------------------------------------------------------------
3551// Helpers for the exported routines below
3552//-------------------------------------------------------------------
3553
3554#ifndef WTF_CHANGES
3555
3556static Span* DoSampledAllocation(size_t size) {
3557
3558 // Grab the stack trace outside the heap lock
3559 StackTrace tmp;
3560 tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
3561 tmp.size = size;
3562
3563 SpinLockHolder h(&pageheap_lock);
3564 // Allocate span
3565 Span *span = pageheap->New(pages(size == 0 ? 1 : size));
3566 if (span == NULL) {
3567 return NULL;
3568 }
3569
3570 // Allocate stack trace
3571 StackTrace *stack = stacktrace_allocator.New();
3572 if (stack == NULL) {
3573 // Sampling failed because of lack of memory
3574 return span;
3575 }
3576
3577 *stack = tmp;
3578 span->sample = 1;
3579 span->objects = stack;
3580 DLL_Prepend(&sampled_objects, span);
3581
3582 return span;
3583}
3584#endif
3585
3586static inline bool CheckCachedSizeClass(void *ptr) {
3587 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3588 size_t cached_value = pageheap->GetSizeClassIfCached(p);
3589 return cached_value == 0 ||
3590 cached_value == pageheap->GetDescriptor(p)->sizeclass;
3591}
3592
3593static inline void* CheckedMallocResult(void *result)
3594{
3595 ASSERT(result == 0 || CheckCachedSizeClass(result));
3596 return result;
3597}
3598
3599static inline void* SpanToMallocResult(Span *span) {
9dae56ea 3600 ASSERT_SPAN_COMMITTED(span);
b37bf2e1
A
3601 pageheap->CacheSizeClass(span->start, 0);
3602 return
3603 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
3604}
3605
9dae56ea
A
3606#ifdef WTF_CHANGES
3607template <bool crashOnFailure>
3608#endif
b37bf2e1
A
3609static ALWAYS_INLINE void* do_malloc(size_t size) {
3610 void* ret = NULL;
3611
3612#ifdef WTF_CHANGES
3613 ASSERT(!isForbidden());
3614#endif
3615
3616 // The following call forces module initialization
3617 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3618#ifndef WTF_CHANGES
3619 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
3620 Span* span = DoSampledAllocation(size);
3621 if (span != NULL) {
3622 ret = SpanToMallocResult(span);
3623 }
3624 } else
3625#endif
3626 if (size > kMaxSize) {
3627 // Use page-level allocator
3628 SpinLockHolder h(&pageheap_lock);
3629 Span* span = pageheap->New(pages(size));
3630 if (span != NULL) {
3631 ret = SpanToMallocResult(span);
3632 }
3633 } else {
3634 // The common case, and also the simplest. This just pops the
3635 // size-appropriate freelist, afer replenishing it if it's empty.
3636 ret = CheckedMallocResult(heap->Allocate(size));
3637 }
9dae56ea
A
3638 if (!ret) {
3639#ifdef WTF_CHANGES
3640 if (crashOnFailure) // This branch should be optimized out by the compiler.
3641 CRASH();
3642#else
3643 errno = ENOMEM;
3644#endif
3645 }
b37bf2e1
A
3646 return ret;
3647}
3648
3649static ALWAYS_INLINE void do_free(void* ptr) {
3650 if (ptr == NULL) return;
3651 ASSERT(pageheap != NULL); // Should not call free() before malloc()
3652 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3653 Span* span = NULL;
3654 size_t cl = pageheap->GetSizeClassIfCached(p);
3655
3656 if (cl == 0) {
3657 span = pageheap->GetDescriptor(p);
3658 cl = span->sizeclass;
3659 pageheap->CacheSizeClass(p, cl);
3660 }
3661 if (cl != 0) {
9dae56ea 3662#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1 3663 ASSERT(!pageheap->GetDescriptor(p)->sample);
9dae56ea 3664#endif
b37bf2e1
A
3665 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
3666 if (heap != NULL) {
3667 heap->Deallocate(ptr, cl);
3668 } else {
3669 // Delete directly into central cache
3670 SLL_SetNext(ptr, NULL);
3671 central_cache[cl].InsertRange(ptr, ptr, 1);
3672 }
3673 } else {
3674 SpinLockHolder h(&pageheap_lock);
3675 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
3676 ASSERT(span != NULL && span->start == p);
9dae56ea 3677#ifndef NO_TCMALLOC_SAMPLES
b37bf2e1
A
3678 if (span->sample) {
3679 DLL_Remove(span);
3680 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
3681 span->objects = NULL;
3682 }
9dae56ea 3683#endif
b37bf2e1
A
3684 pageheap->Delete(span);
3685 }
3686}
3687
3688#ifndef WTF_CHANGES
3689// For use by exported routines below that want specific alignments
3690//
3691// Note: this code can be slow, and can significantly fragment memory.
3692// The expectation is that memalign/posix_memalign/valloc/pvalloc will
3693// not be invoked very often. This requirement simplifies our
3694// implementation and allows us to tune for expected allocation
3695// patterns.
3696static void* do_memalign(size_t align, size_t size) {
3697 ASSERT((align & (align - 1)) == 0);
3698 ASSERT(align > 0);
3699 if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
3700
3701 // Allocate at least one byte to avoid boundary conditions below
3702 if (size == 0) size = 1;
3703
3704 if (size <= kMaxSize && align < kPageSize) {
3705 // Search through acceptable size classes looking for one with
3706 // enough alignment. This depends on the fact that
3707 // InitSizeClasses() currently produces several size classes that
3708 // are aligned at powers of two. We will waste time and space if
3709 // we miss in the size class array, but that is deemed acceptable
3710 // since memalign() should be used rarely.
3711 size_t cl = SizeClass(size);
3712 while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
3713 cl++;
3714 }
3715 if (cl < kNumClasses) {
3716 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3717 return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
3718 }
3719 }
3720
3721 // We will allocate directly from the page heap
3722 SpinLockHolder h(&pageheap_lock);
3723
3724 if (align <= kPageSize) {
3725 // Any page-level allocation will be fine
3726 // TODO: We could put the rest of this page in the appropriate
3727 // TODO: cache but it does not seem worth it.
3728 Span* span = pageheap->New(pages(size));
3729 return span == NULL ? NULL : SpanToMallocResult(span);
3730 }
3731
3732 // Allocate extra pages and carve off an aligned portion
3733 const Length alloc = pages(size + align);
3734 Span* span = pageheap->New(alloc);
3735 if (span == NULL) return NULL;
3736
3737 // Skip starting portion so that we end up aligned
3738 Length skip = 0;
3739 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
3740 skip++;
3741 }
3742 ASSERT(skip < alloc);
3743 if (skip > 0) {
3744 Span* rest = pageheap->Split(span, skip);
3745 pageheap->Delete(span);
3746 span = rest;
3747 }
3748
3749 // Skip trailing portion that we do not need to return
3750 const Length needed = pages(size);
3751 ASSERT(span->length >= needed);
3752 if (span->length > needed) {
3753 Span* trailer = pageheap->Split(span, needed);
3754 pageheap->Delete(trailer);
3755 }
3756 return SpanToMallocResult(span);
3757}
3758#endif
3759
3760// Helpers for use by exported routines below:
3761
3762#ifndef WTF_CHANGES
3763static inline void do_malloc_stats() {
3764 PrintStats(1);
3765}
3766#endif
3767
3768static inline int do_mallopt(int, int) {
3769 return 1; // Indicates error
3770}
3771
3772#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3773static inline struct mallinfo do_mallinfo() {
3774 TCMallocStats stats;
3775 ExtractStats(&stats, NULL);
3776
3777 // Just some of the fields are filled in.
3778 struct mallinfo info;
3779 memset(&info, 0, sizeof(info));
3780
3781 // Unfortunately, the struct contains "int" field, so some of the
3782 // size values will be truncated.
3783 info.arena = static_cast<int>(stats.system_bytes);
3784 info.fsmblks = static_cast<int>(stats.thread_bytes
3785 + stats.central_bytes
3786 + stats.transfer_bytes);
3787 info.fordblks = static_cast<int>(stats.pageheap_bytes);
3788 info.uordblks = static_cast<int>(stats.system_bytes
3789 - stats.thread_bytes
3790 - stats.central_bytes
3791 - stats.transfer_bytes
3792 - stats.pageheap_bytes);
3793
3794 return info;
3795}
3796#endif
3797
3798//-------------------------------------------------------------------
3799// Exported routines
3800//-------------------------------------------------------------------
3801
3802// CAVEAT: The code structure below ensures that MallocHook methods are always
3803// called from the stack frame of the invoked allocation function.
3804// heap-checker.cc depends on this to start a stack trace from
3805// the call to the (de)allocation function.
3806
3807#ifndef WTF_CHANGES
3808extern "C"
9dae56ea
A
3809#else
3810#define do_malloc do_malloc<crashOnFailure>
3811
3812template <bool crashOnFailure>
4e4e5a6f 3813ALWAYS_INLINE void* malloc(size_t);
9dae56ea
A
3814
3815void* fastMalloc(size_t size)
3816{
3817 return malloc<true>(size);
3818}
3819
f9bf01c6 3820TryMallocReturnValue tryFastMalloc(size_t size)
9dae56ea
A
3821{
3822 return malloc<false>(size);
3823}
3824
3825template <bool crashOnFailure>
3826ALWAYS_INLINE
b37bf2e1
A
3827#endif
3828void* malloc(size_t size) {
14957cd0
A
3829#if ENABLE(WTF_MALLOC_VALIDATION)
3830 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= size) // If overflow would occur...
ba379fdc 3831 return 0;
14957cd0 3832 void* result = do_malloc(size + Internal::ValidationBufferSize);
ba379fdc
A
3833 if (!result)
3834 return 0;
3835
14957cd0
A
3836 Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
3837 header->m_size = size;
3838 header->m_type = Internal::AllocTypeMalloc;
3839 header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
3840 result = header + 1;
3841 *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
3842 fastMallocValidate(result);
ba379fdc
A
3843#else
3844 void* result = do_malloc(size);
3845#endif
3846
b37bf2e1
A
3847#ifndef WTF_CHANGES
3848 MallocHook::InvokeNewHook(result, size);
3849#endif
3850 return result;
3851}
3852
3853#ifndef WTF_CHANGES
3854extern "C"
3855#endif
3856void free(void* ptr) {
3857#ifndef WTF_CHANGES
3858 MallocHook::InvokeDeleteHook(ptr);
3859#endif
ba379fdc 3860
14957cd0 3861#if ENABLE(WTF_MALLOC_VALIDATION)
ba379fdc
A
3862 if (!ptr)
3863 return;
3864
14957cd0
A
3865 fastMallocValidate(ptr);
3866 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(ptr);
3867 memset(ptr, 0xCC, header->m_size);
ba379fdc
A
3868 do_free(header);
3869#else
3870 do_free(ptr);
3871#endif
b37bf2e1
A
3872}
3873
3874#ifndef WTF_CHANGES
3875extern "C"
9dae56ea
A
3876#else
3877template <bool crashOnFailure>
4e4e5a6f 3878ALWAYS_INLINE void* calloc(size_t, size_t);
9dae56ea
A
3879
3880void* fastCalloc(size_t n, size_t elem_size)
3881{
14957cd0
A
3882 void* result = calloc<true>(n, elem_size);
3883#if ENABLE(WTF_MALLOC_VALIDATION)
3884 fastMallocValidate(result);
3885#endif
3886 return result;
9dae56ea
A
3887}
3888
f9bf01c6 3889TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
9dae56ea 3890{
14957cd0
A
3891 void* result = calloc<false>(n, elem_size);
3892#if ENABLE(WTF_MALLOC_VALIDATION)
3893 fastMallocValidate(result);
3894#endif
3895 return result;
9dae56ea
A
3896}
3897
3898template <bool crashOnFailure>
3899ALWAYS_INLINE
b37bf2e1
A
3900#endif
3901void* calloc(size_t n, size_t elem_size) {
ba379fdc 3902 size_t totalBytes = n * elem_size;
b37bf2e1
A
3903
3904 // Protect against overflow
3905 if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3906 return 0;
ba379fdc 3907
14957cd0
A
3908#if ENABLE(WTF_MALLOC_VALIDATION)
3909 void* result = malloc<crashOnFailure>(totalBytes);
ba379fdc
A
3910 if (!result)
3911 return 0;
3912
b37bf2e1 3913 memset(result, 0, totalBytes);
14957cd0 3914 fastMallocValidate(result);
ba379fdc
A
3915#else
3916 void* result = do_malloc(totalBytes);
3917 if (result != NULL) {
3918 memset(result, 0, totalBytes);
3919 }
3920#endif
3921
b37bf2e1
A
3922#ifndef WTF_CHANGES
3923 MallocHook::InvokeNewHook(result, totalBytes);
3924#endif
3925 return result;
3926}
3927
9dae56ea
A
3928// Since cfree isn't used anywhere, we don't compile it in.
3929#ifndef WTF_CHANGES
b37bf2e1
A
3930#ifndef WTF_CHANGES
3931extern "C"
3932#endif
3933void cfree(void* ptr) {
3934#ifndef WTF_CHANGES
3935 MallocHook::InvokeDeleteHook(ptr);
3936#endif
3937 do_free(ptr);
3938}
9dae56ea 3939#endif
b37bf2e1
A
3940
3941#ifndef WTF_CHANGES
3942extern "C"
9dae56ea
A
3943#else
3944template <bool crashOnFailure>
4e4e5a6f 3945ALWAYS_INLINE void* realloc(void*, size_t);
9dae56ea
A
3946
3947void* fastRealloc(void* old_ptr, size_t new_size)
3948{
14957cd0
A
3949#if ENABLE(WTF_MALLOC_VALIDATION)
3950 fastMallocValidate(old_ptr);
3951#endif
3952 void* result = realloc<true>(old_ptr, new_size);
3953#if ENABLE(WTF_MALLOC_VALIDATION)
3954 fastMallocValidate(result);
3955#endif
3956 return result;
9dae56ea
A
3957}
3958
f9bf01c6 3959TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
9dae56ea 3960{
14957cd0
A
3961#if ENABLE(WTF_MALLOC_VALIDATION)
3962 fastMallocValidate(old_ptr);
3963#endif
3964 void* result = realloc<false>(old_ptr, new_size);
3965#if ENABLE(WTF_MALLOC_VALIDATION)
3966 fastMallocValidate(result);
3967#endif
3968 return result;
9dae56ea
A
3969}
3970
3971template <bool crashOnFailure>
3972ALWAYS_INLINE
b37bf2e1
A
3973#endif
3974void* realloc(void* old_ptr, size_t new_size) {
3975 if (old_ptr == NULL) {
14957cd0
A
3976#if ENABLE(WTF_MALLOC_VALIDATION)
3977 void* result = malloc<crashOnFailure>(new_size);
ba379fdc 3978#else
b37bf2e1
A
3979 void* result = do_malloc(new_size);
3980#ifndef WTF_CHANGES
3981 MallocHook::InvokeNewHook(result, new_size);
ba379fdc 3982#endif
b37bf2e1
A
3983#endif
3984 return result;
3985 }
3986 if (new_size == 0) {
3987#ifndef WTF_CHANGES
3988 MallocHook::InvokeDeleteHook(old_ptr);
3989#endif
3990 free(old_ptr);
3991 return NULL;
3992 }
3993
14957cd0
A
3994#if ENABLE(WTF_MALLOC_VALIDATION)
3995 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= new_size) // If overflow would occur...
ba379fdc 3996 return 0;
14957cd0
A
3997 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(old_ptr);
3998 fastMallocValidate(old_ptr);
ba379fdc 3999 old_ptr = header;
14957cd0
A
4000 header->m_size = new_size;
4001 new_size += Internal::ValidationBufferSize;
ba379fdc
A
4002#endif
4003
b37bf2e1
A
4004 // Get the size of the old entry
4005 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
4006 size_t cl = pageheap->GetSizeClassIfCached(p);
4007 Span *span = NULL;
4008 size_t old_size;
4009 if (cl == 0) {
4010 span = pageheap->GetDescriptor(p);
4011 cl = span->sizeclass;
4012 pageheap->CacheSizeClass(p, cl);
4013 }
4014 if (cl != 0) {
4015 old_size = ByteSizeForClass(cl);
4016 } else {
4017 ASSERT(span != NULL);
4018 old_size = span->length << kPageShift;
4019 }
4020
4021 // Reallocate if the new size is larger than the old size,
4022 // or if the new size is significantly smaller than the old size.
4023 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
4024 // Need to reallocate
4025 void* new_ptr = do_malloc(new_size);
4026 if (new_ptr == NULL) {
4027 return NULL;
4028 }
4029#ifndef WTF_CHANGES
4030 MallocHook::InvokeNewHook(new_ptr, new_size);
4031#endif
4032 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
4033#ifndef WTF_CHANGES
4034 MallocHook::InvokeDeleteHook(old_ptr);
4035#endif
4036 // We could use a variant of do_free() that leverages the fact
4037 // that we already know the sizeclass of old_ptr. The benefit
4038 // would be small, so don't bother.
4039 do_free(old_ptr);
14957cd0
A
4040#if ENABLE(WTF_MALLOC_VALIDATION)
4041 new_ptr = static_cast<Internal::ValidationHeader*>(new_ptr) + 1;
4042 *Internal::fastMallocValidationSuffix(new_ptr) = Internal::ValidationSuffix;
ba379fdc 4043#endif
b37bf2e1
A
4044 return new_ptr;
4045 } else {
14957cd0
A
4046#if ENABLE(WTF_MALLOC_VALIDATION)
4047 old_ptr = static_cast<Internal::ValidationHeader*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
4048 *Internal::fastMallocValidationSuffix(old_ptr) = Internal::ValidationSuffix;
ba379fdc 4049#endif
b37bf2e1
A
4050 return old_ptr;
4051 }
4052}
4053
9dae56ea
A
4054#ifdef WTF_CHANGES
4055#undef do_malloc
4056#else
b37bf2e1
A
4057
4058static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
4059
4060static inline void* cpp_alloc(size_t size, bool nothrow) {
4061 for (;;) {
4062 void* p = do_malloc(size);
4063#ifdef PREANSINEW
4064 return p;
4065#else
4066 if (p == NULL) { // allocation failed
4067 // Get the current new handler. NB: this function is not
4068 // thread-safe. We make a feeble stab at making it so here, but
4069 // this lock only protects against tcmalloc interfering with
4070 // itself, not with other libraries calling set_new_handler.
4071 std::new_handler nh;
4072 {
4073 SpinLockHolder h(&set_new_handler_lock);
4074 nh = std::set_new_handler(0);
4075 (void) std::set_new_handler(nh);
4076 }
4077 // If no new_handler is established, the allocation failed.
4078 if (!nh) {
4079 if (nothrow) return 0;
4080 throw std::bad_alloc();
4081 }
4082 // Otherwise, try the new_handler. If it returns, retry the
4083 // allocation. If it throws std::bad_alloc, fail the allocation.
4084 // if it throws something else, don't interfere.
4085 try {
4086 (*nh)();
4087 } catch (const std::bad_alloc&) {
4088 if (!nothrow) throw;
4089 return p;
4090 }
4091 } else { // allocation success
4092 return p;
4093 }
4094#endif
4095 }
4096}
4097
4e4e5a6f
A
4098#if ENABLE(GLOBAL_FASTMALLOC_NEW)
4099
b37bf2e1
A
4100void* operator new(size_t size) {
4101 void* p = cpp_alloc(size, false);
4102 // We keep this next instruction out of cpp_alloc for a reason: when
4103 // it's in, and new just calls cpp_alloc, the optimizer may fold the
4104 // new call into cpp_alloc, which messes up our whole section-based
4105 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
4106 // isn't the last thing this fn calls, and prevents the folding.
4107 MallocHook::InvokeNewHook(p, size);
4108 return p;
4109}
4110
4111void* operator new(size_t size, const std::nothrow_t&) __THROW {
4112 void* p = cpp_alloc(size, true);
4113 MallocHook::InvokeNewHook(p, size);
4114 return p;
4115}
4116
4117void operator delete(void* p) __THROW {
4118 MallocHook::InvokeDeleteHook(p);
4119 do_free(p);
4120}
4121
4122void operator delete(void* p, const std::nothrow_t&) __THROW {
4123 MallocHook::InvokeDeleteHook(p);
4124 do_free(p);
4125}
4126
4127void* operator new[](size_t size) {
4128 void* p = cpp_alloc(size, false);
4129 // We keep this next instruction out of cpp_alloc for a reason: when
4130 // it's in, and new just calls cpp_alloc, the optimizer may fold the
4131 // new call into cpp_alloc, which messes up our whole section-based
4132 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
4133 // isn't the last thing this fn calls, and prevents the folding.
4134 MallocHook::InvokeNewHook(p, size);
4135 return p;
4136}
4137
4138void* operator new[](size_t size, const std::nothrow_t&) __THROW {
4139 void* p = cpp_alloc(size, true);
4140 MallocHook::InvokeNewHook(p, size);
4141 return p;
4142}
4143
4144void operator delete[](void* p) __THROW {
4145 MallocHook::InvokeDeleteHook(p);
4146 do_free(p);
4147}
4148
4149void operator delete[](void* p, const std::nothrow_t&) __THROW {
4150 MallocHook::InvokeDeleteHook(p);
4151 do_free(p);
4152}
4153
4e4e5a6f
A
4154#endif
4155
b37bf2e1
A
4156extern "C" void* memalign(size_t align, size_t size) __THROW {
4157 void* result = do_memalign(align, size);
4158 MallocHook::InvokeNewHook(result, size);
4159 return result;
4160}
4161
4162extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
4163 __THROW {
4164 if (((align % sizeof(void*)) != 0) ||
4165 ((align & (align - 1)) != 0) ||
4166 (align == 0)) {
4167 return EINVAL;
4168 }
4169
4170 void* result = do_memalign(align, size);
4171 MallocHook::InvokeNewHook(result, size);
4172 if (result == NULL) {
4173 return ENOMEM;
4174 } else {
4175 *result_ptr = result;
4176 return 0;
4177 }
4178}
4179
4180static size_t pagesize = 0;
4181
4182extern "C" void* valloc(size_t size) __THROW {
4183 // Allocate page-aligned object of length >= size bytes
4184 if (pagesize == 0) pagesize = getpagesize();
4185 void* result = do_memalign(pagesize, size);
4186 MallocHook::InvokeNewHook(result, size);
4187 return result;
4188}
4189
4190extern "C" void* pvalloc(size_t size) __THROW {
4191 // Round up size to a multiple of pagesize
4192 if (pagesize == 0) pagesize = getpagesize();
4193 size = (size + pagesize - 1) & ~(pagesize - 1);
4194 void* result = do_memalign(pagesize, size);
4195 MallocHook::InvokeNewHook(result, size);
4196 return result;
4197}
4198
4199extern "C" void malloc_stats(void) {
4200 do_malloc_stats();
4201}
4202
4203extern "C" int mallopt(int cmd, int value) {
4204 return do_mallopt(cmd, value);
4205}
4206
4207#ifdef HAVE_STRUCT_MALLINFO
4208extern "C" struct mallinfo mallinfo(void) {
4209 return do_mallinfo();
4210}
4211#endif
4212
4213//-------------------------------------------------------------------
4214// Some library routines on RedHat 9 allocate memory using malloc()
4215// and free it using __libc_free() (or vice-versa). Since we provide
4216// our own implementations of malloc/free, we need to make sure that
4217// the __libc_XXX variants (defined as part of glibc) also point to
4218// the same implementations.
4219//-------------------------------------------------------------------
4220
4221#if defined(__GLIBC__)
4222extern "C" {
ba379fdc 4223#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
b37bf2e1
A
4224 // Potentially faster variants that use the gcc alias extension.
4225 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
4226# define ALIAS(x) __attribute__ ((weak, alias (x)))
4227 void* __libc_malloc(size_t size) ALIAS("malloc");
4228 void __libc_free(void* ptr) ALIAS("free");
4229 void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
4230 void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
4231 void __libc_cfree(void* ptr) ALIAS("cfree");
4232 void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
4233 void* __libc_valloc(size_t size) ALIAS("valloc");
4234 void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
4235 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
4236# undef ALIAS
4237# else /* not __GNUC__ */
4238 // Portable wrappers
4239 void* __libc_malloc(size_t size) { return malloc(size); }
4240 void __libc_free(void* ptr) { free(ptr); }
4241 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
4242 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
4243 void __libc_cfree(void* ptr) { cfree(ptr); }
4244 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
4245 void* __libc_valloc(size_t size) { return valloc(size); }
4246 void* __libc_pvalloc(size_t size) { return pvalloc(size); }
4247 int __posix_memalign(void** r, size_t a, size_t s) {
4248 return posix_memalign(r, a, s);
4249 }
4250# endif /* __GNUC__ */
4251}
4252#endif /* __GLIBC__ */
4253
4254// Override __libc_memalign in libc on linux boxes specially.
4255// They have a bug in libc that causes them to (very rarely) allocate
4256// with __libc_memalign() yet deallocate with free() and the
4257// definitions above don't catch it.
4258// This function is an exception to the rule of calling MallocHook method
4259// from the stack frame of the allocation function;
4260// heap-checker handles this special case explicitly.
4261static void *MemalignOverride(size_t align, size_t size, const void *caller)
4262 __THROW {
4263 void* result = do_memalign(align, size);
4264 MallocHook::InvokeNewHook(result, size);
4265 return result;
4266}
4267void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
4268
4269#endif
4270
4e4e5a6f
A
4271#ifdef WTF_CHANGES
4272void releaseFastMallocFreeMemory()
4273{
4274 // Flush free pages in the current thread cache back to the page heap.
4275 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
4276 // The second pass flushes everything.
4277 if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
4278 threadCache->Scavenge();
4279 threadCache->Scavenge();
4280 }
4281
4282 SpinLockHolder h(&pageheap_lock);
4283 pageheap->ReleaseFreePages();
4284}
4285
4286FastMallocStatistics fastMallocStatistics()
4287{
4288 FastMallocStatistics statistics;
4289
4290 SpinLockHolder lockHolder(&pageheap_lock);
4291 statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
4292 statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
4293
4294 statistics.freeListBytes = 0;
4295 for (unsigned cl = 0; cl < kNumClasses; ++cl) {
4296 const int length = central_cache[cl].length();
4297 const int tc_length = central_cache[cl].tc_length();
4298
4299 statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
4300 }
4301 for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
4302 statistics.freeListBytes += threadCache->Size();
4303
4304 return statistics;
4305}
4306
4307size_t fastMallocSize(const void* ptr)
4308{
14957cd0
A
4309#if ENABLE(WTF_MALLOC_VALIDATION)
4310 return Internal::fastMallocValidationHeader(const_cast<void*>(ptr))->m_size;
4311#else
4e4e5a6f
A
4312 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
4313 Span* span = pageheap->GetDescriptorEnsureSafe(p);
4314
4315 if (!span || span->free)
4316 return 0;
4317
4318 for (void* free = span->objects; free != NULL; free = *((void**) free)) {
4319 if (ptr == free)
4320 return 0;
4321 }
4322
4323 if (size_t cl = span->sizeclass)
4324 return ByteSizeForClass(cl);
4325
4326 return span->length << kPageShift;
14957cd0 4327#endif
4e4e5a6f
A
4328}
4329
4330#if OS(DARWIN)
b37bf2e1
A
4331
4332class FreeObjectFinder {
4333 const RemoteMemoryReader& m_reader;
4334 HashSet<void*> m_freeObjects;
4335
4336public:
4337 FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
4338
4339 void visit(void* ptr) { m_freeObjects.add(ptr); }
4340 bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
ba379fdc 4341 bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
b37bf2e1
A
4342 size_t freeObjectCount() const { return m_freeObjects.size(); }
4343
4344 void findFreeObjects(TCMalloc_ThreadCache* threadCache)
4345 {
4346 for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
4347 threadCache->enumerateFreeObjects(*this, m_reader);
4348 }
4349
9dae56ea 4350 void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
b37bf2e1
A
4351 {
4352 for (unsigned i = 0; i < numSizes; i++)
9dae56ea 4353 centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
b37bf2e1
A
4354 }
4355};
4356
4357class PageMapFreeObjectFinder {
4358 const RemoteMemoryReader& m_reader;
4359 FreeObjectFinder& m_freeObjectFinder;
4360
4361public:
4362 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder)
4363 : m_reader(reader)
4364 , m_freeObjectFinder(freeObjectFinder)
4365 { }
4366
4367 int visit(void* ptr) const
4368 {
4369 if (!ptr)
4370 return 1;
4371
4372 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
14957cd0
A
4373 if (!span)
4374 return 1;
4375
b37bf2e1
A
4376 if (span->free) {
4377 void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
4378 m_freeObjectFinder.visit(ptr);
4379 } else if (span->sizeclass) {
4380 // Walk the free list of the small-object span, keeping track of each object seen
14957cd0 4381 for (void* nextObject = span->objects; nextObject; nextObject = m_reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))
b37bf2e1
A
4382 m_freeObjectFinder.visit(nextObject);
4383 }
4384 return span->length;
4385 }
4386};
4387
4388class PageMapMemoryUsageRecorder {
4389 task_t m_task;
4390 void* m_context;
4391 unsigned m_typeMask;
4392 vm_range_recorder_t* m_recorder;
4393 const RemoteMemoryReader& m_reader;
4394 const FreeObjectFinder& m_freeObjectFinder;
ba379fdc
A
4395
4396 HashSet<void*> m_seenPointers;
4397 Vector<Span*> m_coalescedSpans;
b37bf2e1
A
4398
4399public:
4400 PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
4401 : m_task(task)
4402 , m_context(context)
4403 , m_typeMask(typeMask)
4404 , m_recorder(recorder)
4405 , m_reader(reader)
4406 , m_freeObjectFinder(freeObjectFinder)
4407 { }
4408
ba379fdc
A
4409 ~PageMapMemoryUsageRecorder()
4410 {
4411 ASSERT(!m_coalescedSpans.size());
4412 }
4413
4414 void recordPendingRegions()
4415 {
4416 Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4417 vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
4418 ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
4419
4420 // Mark the memory region the spans represent as a candidate for containing pointers
4421 if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
4422 (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
4423
4424 if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
4425 m_coalescedSpans.clear();
4426 return;
4427 }
4428
4429 Vector<vm_range_t, 1024> allocatedPointers;
4430 for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
4431 Span *theSpan = m_coalescedSpans[i];
4432 if (theSpan->free)
4433 continue;
4434
4435 vm_address_t spanStartAddress = theSpan->start << kPageShift;
4436 vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
4437
4438 if (!theSpan->sizeclass) {
4439 // If it's an allocated large object span, mark it as in use
4440 if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
4441 allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
4442 } else {
4443 const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
4444
4445 // Mark each allocated small object within the span as in use
4446 const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
4447 for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
4448 if (!m_freeObjectFinder.isFreeObject(object))
4449 allocatedPointers.append((vm_range_t){object, objectSize});
4450 }
4451 }
4452 }
4453
4454 (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
4455
4456 m_coalescedSpans.clear();
4457 }
4458
4459 int visit(void* ptr)
b37bf2e1
A
4460 {
4461 if (!ptr)
4462 return 1;
4463
4464 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
14957cd0 4465 if (!span || !span->start)
ba379fdc
A
4466 return 1;
4467
b37bf2e1
A
4468 if (m_seenPointers.contains(ptr))
4469 return span->length;
4470 m_seenPointers.add(ptr);
4471
ba379fdc
A
4472 if (!m_coalescedSpans.size()) {
4473 m_coalescedSpans.append(span);
4474 return span->length;
4475 }
b37bf2e1 4476
ba379fdc
A
4477 Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4478 vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
4479 vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
b37bf2e1 4480
ba379fdc
A
4481 // If the new span is adjacent to the previous span, do nothing for now.
4482 vm_address_t spanStartAddress = span->start << kPageShift;
4483 if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
4484 m_coalescedSpans.append(span);
4485 return span->length;
4486 }
b37bf2e1 4487
ba379fdc
A
4488 // New span is not adjacent to previous span, so record the spans coalesced so far.
4489 recordPendingRegions();
4490 m_coalescedSpans.append(span);
b37bf2e1 4491
ba379fdc
A
4492 return span->length;
4493 }
4494};
b37bf2e1 4495
ba379fdc
A
4496class AdminRegionRecorder {
4497 task_t m_task;
4498 void* m_context;
4499 unsigned m_typeMask;
4500 vm_range_recorder_t* m_recorder;
4501 const RemoteMemoryReader& m_reader;
4502
4503 Vector<vm_range_t, 1024> m_pendingRegions;
4504
4505public:
4506 AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
4507 : m_task(task)
4508 , m_context(context)
4509 , m_typeMask(typeMask)
4510 , m_recorder(recorder)
4511 , m_reader(reader)
4512 { }
4513
4514 void recordRegion(vm_address_t ptr, size_t size)
4515 {
4516 if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
4517 m_pendingRegions.append((vm_range_t){ ptr, size });
4518 }
4519
4520 void visit(void *ptr, size_t size)
4521 {
4522 recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
4523 }
4524
4525 void recordPendingRegions()
4526 {
4527 if (m_pendingRegions.size()) {
4528 (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
4529 m_pendingRegions.clear();
b37bf2e1 4530 }
ba379fdc 4531 }
b37bf2e1 4532
ba379fdc
A
4533 ~AdminRegionRecorder()
4534 {
4535 ASSERT(!m_pendingRegions.size());
b37bf2e1
A
4536 }
4537};
4538
4539kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
4540{
4541 RemoteMemoryReader memoryReader(task, reader);
4542
4543 InitSizeClasses();
4544
4545 FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
4546 TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
4547 TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
4548 TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
4549
4550 TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
4551
4552 FreeObjectFinder finder(memoryReader);
4553 finder.findFreeObjects(threadHeaps);
9dae56ea 4554 finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
b37bf2e1
A
4555
4556 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
4557 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
ba379fdc 4558 pageMap->visitValues(pageMapFinder, memoryReader);
b37bf2e1
A
4559
4560 PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
ba379fdc
A
4561 pageMap->visitValues(usageRecorder, memoryReader);
4562 usageRecorder.recordPendingRegions();
4563
4564 AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
4565 pageMap->visitAllocations(adminRegionRecorder, memoryReader);
4566
4567 PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
4568 PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
4569
4570 spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4571 pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4572
4573 adminRegionRecorder.recordPendingRegions();
b37bf2e1
A
4574
4575 return 0;
4576}
4577
4578size_t FastMallocZone::size(malloc_zone_t*, const void*)
4579{
4580 return 0;
4581}
4582
4583void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
4584{
4585 return 0;
4586}
4587
4588void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
4589{
4590 return 0;
4591}
4592
4593void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
4594{
4595 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
4596 // is not in this zone. When this happens, the pointer being freed was not allocated by any
4597 // zone so we need to print a useful error for the application developer.
4598 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
4599}
4600
4601void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
4602{
4603 return 0;
4604}
4605
4606
4607#undef malloc
4608#undef free
4609#undef realloc
4610#undef calloc
4611
4612extern "C" {
4613malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
9bcd318d 4614 &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
14957cd0 4615
ba379fdc 4616 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
b80e6193 4617 , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
14957cd0 4618
9bcd318d 4619 };
b37bf2e1
A
4620}
4621
ba379fdc 4622FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
b37bf2e1
A
4623 : m_pageHeap(pageHeap)
4624 , m_threadHeaps(threadHeaps)
4625 , m_centralCaches(centralCaches)
ba379fdc
A
4626 , m_spanAllocator(spanAllocator)
4627 , m_pageHeapAllocator(pageHeapAllocator)
b37bf2e1
A
4628{
4629 memset(&m_zone, 0, sizeof(m_zone));
9bcd318d 4630 m_zone.version = 4;
b37bf2e1
A
4631 m_zone.zone_name = "JavaScriptCore FastMalloc";
4632 m_zone.size = &FastMallocZone::size;
4633 m_zone.malloc = &FastMallocZone::zoneMalloc;
4634 m_zone.calloc = &FastMallocZone::zoneCalloc;
4635 m_zone.realloc = &FastMallocZone::zoneRealloc;
4636 m_zone.free = &FastMallocZone::zoneFree;
4637 m_zone.valloc = &FastMallocZone::zoneValloc;
4638 m_zone.destroy = &FastMallocZone::zoneDestroy;
4639 m_zone.introspect = &jscore_fastmalloc_introspection;
4640 malloc_zone_register(&m_zone);
4641}
4642
4643
4644void FastMallocZone::init()
4645{
ba379fdc 4646 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
b37bf2e1
A
4647}
4648
4e4e5a6f 4649#endif // OS(DARWIN)
b37bf2e1 4650
b37bf2e1 4651} // namespace WTF
4e4e5a6f 4652#endif // WTF_CHANGES
b37bf2e1 4653
f4e78d34 4654#endif // FORCE_SYSTEM_MALLOC