// Copyright (c) 2005, 2007, Google Inc.
// All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+// Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
#include "Assertions.h"
#include <limits>
-#if ENABLE(JSC_MULTIPLE_THREADS)
+#if ENABLE(WTF_MULTIPLE_THREADS)
#include <pthread.h>
#endif
+#include <wtf/StdLibExtras.h>
#ifndef NO_TCMALLOC_SAMPLES
#ifdef WTF_CHANGES
#endif
// Use a background thread to periodically scavenge memory to release back to the system
-// https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
#define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
#ifndef NDEBUG
namespace WTF {
-#if ENABLE(JSC_MULTIPLE_THREADS)
+#if ENABLE(WTF_MULTIPLE_THREADS)
static pthread_key_t isForbiddenKey;
static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
static void initializeIsForbiddenKey()
{
staticIsForbidden = false;
}
-#endif // ENABLE(JSC_MULTIPLE_THREADS)
+#endif // ENABLE(WTF_MULTIPLE_THREADS)
} // namespace WTF
#endif // NDEBUG
namespace WTF {
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
namespace Internal {
-
+#if !ENABLE(WTF_MALLOC_VALIDATION)
+void fastMallocMatchFailed(void*);
+#else
+COMPILE_ASSERT(((sizeof(ValidationHeader) % sizeof(AllocAlignmentInteger)) == 0), ValidationHeader_must_produce_correct_alignment);
+#endif
void fastMallocMatchFailed(void*)
{
CRASH();
} // namespace Internal
-#endif
void* fastZeroedMalloc(size_t n)
{
char* fastStrDup(const char* src)
{
- int len = strlen(src) + 1;
+ size_t len = strlen(src) + 1;
char* dup = static_cast<char*>(fastMalloc(len));
-
- if (dup)
- memcpy(dup, src, len);
-
+ memcpy(dup, src, len);
return dup;
}
-
+
TryMallocReturnValue tryFastZeroedMalloc(size_t n)
{
void* result;
#if FORCE_SYSTEM_MALLOC
+#if PLATFORM(BREWMP)
+#include "brew/SystemMallocBrew.h"
+#endif
+
+#if OS(DARWIN)
+#include <malloc/malloc.h>
+#elif OS(WINDOWS)
+#include <malloc.h>
+#endif
+
namespace WTF {
TryMallocReturnValue tryFastMalloc(size_t n)
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
return 0;
- void* result = malloc(n + sizeof(AllocAlignmentInteger));
+ void* result = malloc(n + Internal::ValidationBufferSize);
if (!result)
return 0;
-
- *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
- result = static_cast<AllocAlignmentInteger*>(result) + 1;
-
+ Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
+ header->m_size = n;
+ header->m_type = Internal::AllocTypeMalloc;
+ header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
+ result = header + 1;
+ *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
+ fastMallocValidate(result);
return result;
#else
return malloc(n);
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
TryMallocReturnValue returnValue = tryFastMalloc(n);
void* result;
- returnValue.getValue(result);
+ if (!returnValue.getValue(result))
+ CRASH();
#else
void* result = malloc(n);
#endif
- if (!result)
+ if (!result) {
+#if PLATFORM(BREWMP)
+ // The behavior of malloc(0) is implementation defined.
+ // To make sure that fastMalloc never returns 0, retry with fastMalloc(1).
+ if (!n)
+ return fastMalloc(1);
+#endif
CRASH();
+ }
+
return result;
}
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
size_t totalBytes = n_elements * element_size;
- if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
+ if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements)
return 0;
- totalBytes += sizeof(AllocAlignmentInteger);
- void* result = malloc(totalBytes);
- if (!result)
+ TryMallocReturnValue returnValue = tryFastMalloc(totalBytes);
+ void* result;
+ if (!returnValue.getValue(result))
return 0;
-
memset(result, 0, totalBytes);
- *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
- result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ fastMallocValidate(result);
return result;
#else
return calloc(n_elements, element_size);
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
void* result;
- returnValue.getValue(result);
+ if (!returnValue.getValue(result))
+ CRASH();
#else
void* result = calloc(n_elements, element_size);
#endif
- if (!result)
+ if (!result) {
+#if PLATFORM(BREWMP)
+ // If either n_elements or element_size is 0, the behavior of calloc is implementation defined.
+ // To make sure that fastCalloc never returns 0, retry with fastCalloc(1, 1).
+ if (!n_elements || !element_size)
+ return fastCalloc(1, 1);
+#endif
CRASH();
+ }
+
return result;
}
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
if (!p)
return;
-
- AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
- if (*header != Internal::AllocTypeMalloc)
- Internal::fastMallocMatchFailed(p);
+
+ fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc);
+ Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p);
+ memset(p, 0xCC, header->m_size);
free(header);
#else
free(p);
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
if (p) {
- if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
+ if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n) // If overflow would occur...
return 0;
- AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
- if (*header != Internal::AllocTypeMalloc)
- Internal::fastMallocMatchFailed(p);
- void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
+ fastMallocValidate(p);
+ Internal::ValidationHeader* result = static_cast<Internal::ValidationHeader*>(realloc(Internal::fastMallocValidationHeader(p), n + Internal::ValidationBufferSize));
if (!result)
return 0;
-
- // This should not be needed because the value is already there:
- // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
- result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ result->m_size = n;
+ result = result + 1;
+ *fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
+ fastMallocValidate(result);
return result;
} else {
return fastMalloc(n);
{
ASSERT(!isForbidden());
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
TryMallocReturnValue returnValue = tryFastRealloc(p, n);
void* result;
- returnValue.getValue(result);
+ if (!returnValue.getValue(result))
+ CRASH();
#else
void* result = realloc(p, n);
#endif
FastMallocStatistics fastMallocStatistics()
{
- FastMallocStatistics statistics = { 0, 0, 0, 0 };
+ FastMallocStatistics statistics = { 0, 0, 0 };
return statistics;
}
+size_t fastMallocSize(const void* p)
+{
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ return Internal::fastMallocValidationHeader(const_cast<void*>(p))->m_size;
+#elif OS(DARWIN)
+ return malloc_size(p);
+#elif OS(WINDOWS) && !PLATFORM(BREWMP)
+ // Brew MP uses its own memory allocator, so _msize does not work on the Brew MP simulator.
+ return _msize(const_cast<void*>(p));
+#else
+ return 1;
+#endif
+}
+
} // namespace WTF
#if OS(DARWIN)
#include "TCSpinLock.h"
#include "TCSystemAlloc.h"
#include <algorithm>
-#include <errno.h>
#include <limits>
-#include <new>
#include <pthread.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#endif
#if OS(UNIX)
#include <unistd.h>
#endif
-#if COMPILER(MSVC)
+#if OS(WINDOWS)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#endif
-#if WTF_CHANGES
+#ifdef WTF_CHANGES
#if OS(DARWIN)
#include "MallocZoneSupport.h"
#include <wtf/HashSet.h>
#include <wtf/Vector.h>
#endif
+
+#if HAVE(HEADER_DETECTION_H)
+#include "HeaderDetection.h"
+#endif
+
#if HAVE(DISPATCH_H)
#include <dispatch/dispatch.h>
#endif
+#if HAVE(PTHREAD_MACHDEP_H)
+#include <System/pthread_machdep.h>
+
+#if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
+#define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1
+#endif
+#endif
#ifndef PRIuS
#define PRIuS "zu"
// use a function pointer. But that's not necessarily faster on other platforms, and we had
// problems with this technique on Windows, so we'll do this only on Mac OS X.
#if OS(DARWIN)
+#if !USE(PTHREAD_GETSPECIFIC_DIRECT)
static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
#define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
+#else
+#define pthread_getspecific(key) _pthread_getspecific_direct(key)
+#define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val))
+#endif
#endif
#define DEFINE_VARIABLE(type, name, value, meaning) \
#define CHECK_CONDITION ASSERT
#if OS(DARWIN)
-class Span;
+struct Span;
class TCMalloc_Central_FreeListPadded;
class TCMalloc_PageHeap;
class TCMalloc_ThreadCache;
if (!new_allocation)
CRASH();
- *(void**)new_allocation = allocated_regions_;
+ *reinterpret_cast_ptr<void**>(new_allocation) = allocated_regions_;
allocated_regions_ = new_allocation;
free_area_ = new_allocation + kAlignedSize;
free_avail_ = kAllocIncrement - kAlignedSize;
template <class Recorder>
void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
{
- vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
- while (adminAllocation) {
- recorder.recordRegion(adminAllocation, kAllocIncrement);
- adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
- }
+ for (void* adminAllocation = allocated_regions_; adminAllocation; adminAllocation = reader.nextEntryInLinkedList(reinterpret_cast<void**>(adminAllocation)))
+ recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation), kAllocIncrement);
}
#endif
};
// -------------------------------------------------------------------------
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-// The central page heap collects spans of memory that have been deleted but are still committed until they are released
-// back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
-// system. Every 5 seconds, the background thread wakes up and does the following:
-// - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
-// of free committed pages and so we should not release them back to the system yet.
-// - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
-// back to the system.
-// - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
-// scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
-
-// Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
-static const int kScavengeTimerDelayInSeconds = 5;
-
-// Number of free committed pages that we want to keep around.
-static const size_t kMinimumFreeCommittedPageCount = 512;
-
-// During a scavenge, we'll release up to a fraction of the free committed pages.
-#if OS(WINDOWS)
-// We are slightly less aggressive in releasing memory on Windows due to performance reasons.
-static const int kMaxScavengeAmountFactor = 3;
-#else
-static const int kMaxScavengeAmountFactor = 2;
-#endif
+// The page heap maintains a free list for spans that are no longer in use by
+// the central cache or any thread caches. We use a background thread to
+// periodically scan the free list and release a percentage of it back to the OS.
+
+// If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
+// background thread:
+// - wakes up
+// - pauses for kScavengeDelayInSeconds
+// - returns to the OS a percentage of the memory that remained unused during
+// that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
+// The goal of this strategy is to reduce memory pressure in a timely fashion
+// while avoiding thrashing the OS allocator.
+
+// Time delay before the page heap scavenger will consider returning pages to
+// the OS.
+static const int kScavengeDelayInSeconds = 2;
+
+// Approximate percentage of free committed pages to return to the OS in one
+// scavenge.
+static const float kScavengePercentage = .5f;
+
+// number of span lists to keep spans in when memory is returned.
+static const int kMinSpanListsWithSpans = 32;
+
+// Number of free committed pages that we want to keep around. The minimum number of pages used when there
+// is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages.
+static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
+
#endif
+static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
+
class TCMalloc_PageHeap {
public:
void init();
}
bool Check();
- bool CheckList(Span* list, Length min_pages, Length max_pages);
+ bool CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted);
// Release all pages on the free list for reuse by the OS:
void ReleaseFreePages();
// Number of pages kept in free lists that are still committed.
Length free_committed_pages_;
- // Number of pages that we committed in the last scavenge wait interval.
- Length pages_committed_since_last_scavenge_;
+ // Minimum number of free committed pages since last scavenge. (Can be 0 if
+ // we've committed new pages since the last scavenge.)
+ Length min_free_committed_pages_since_last_scavenge_;
#endif
bool GrowHeap(Length n);
void initializeScavenger();
ALWAYS_INLINE void signalScavenger();
void scavenge();
- ALWAYS_INLINE bool shouldContinueScavenging() const;
+ ALWAYS_INLINE bool shouldScavenge() const;
-#if !HAVE(DISPATCH_H)
- static NO_RETURN void* runScavengerThread(void*);
+#if HAVE(DISPATCH_H) || OS(WINDOWS)
+ void periodicScavenge();
+ ALWAYS_INLINE bool isScavengerSuspended();
+ ALWAYS_INLINE void scheduleScavenger();
+ ALWAYS_INLINE void rescheduleScavenger();
+ ALWAYS_INLINE void suspendScavenger();
+#endif
+
+#if HAVE(DISPATCH_H)
+ dispatch_queue_t m_scavengeQueue;
+ dispatch_source_t m_scavengeTimer;
+ bool m_scavengingSuspended;
+#elif OS(WINDOWS)
+ static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
+ HANDLE m_scavengeQueueTimer;
+#else
+ static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
NO_RETURN void scavengerThread();
- // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
+ // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
// it's blocked waiting for more pages to be deleted.
bool m_scavengeThreadActive;
pthread_mutex_t m_scavengeMutex;
pthread_cond_t m_scavengeCondition;
-#else // !HAVE(DISPATCH_H)
- void periodicScavenge();
-
- dispatch_queue_t m_scavengeQueue;
- dispatch_source_t m_scavengeTimer;
- bool m_scavengingScheduled;
#endif
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
free_committed_pages_ = 0;
- pages_committed_since_last_scavenge_ = 0;
+ min_free_committed_pages_since_last_scavenge_ = 0;
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
scavenge_counter_ = 0;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-#if !HAVE(DISPATCH_H)
+#if HAVE(DISPATCH_H)
void TCMalloc_PageHeap::initializeScavenger()
{
- pthread_mutex_init(&m_scavengeMutex, 0);
- pthread_cond_init(&m_scavengeCondition, 0);
- m_scavengeThreadActive = true;
- pthread_t thread;
- pthread_create(&thread, 0, runScavengerThread, this);
+ m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
+ m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
+ dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeDelayInSeconds * NSEC_PER_SEC);
+ dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
+ dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
+ m_scavengingSuspended = true;
}
-void* TCMalloc_PageHeap::runScavengerThread(void* context)
+ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
{
- static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
-#if COMPILER(MSVC)
- // Without this, Visual Studio will complain that this method does not return a value.
- return 0;
-#endif
+ ASSERT(pageheap_lock.IsHeld());
+ return m_scavengingSuspended;
}
-ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
+ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
+{
+ ASSERT(pageheap_lock.IsHeld());
+ m_scavengingSuspended = false;
+ dispatch_resume(m_scavengeTimer);
+}
+
+ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
{
- if (!m_scavengeThreadActive && shouldContinueScavenging())
- pthread_cond_signal(&m_scavengeCondition);
+ // Nothing to do here for libdispatch.
}
-#else // !HAVE(DISPATCH_H)
+ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
+{
+ ASSERT(pageheap_lock.IsHeld());
+ m_scavengingSuspended = true;
+ dispatch_suspend(m_scavengeTimer);
+}
+
+#elif OS(WINDOWS)
+
+void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN)
+{
+ static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge();
+}
void TCMalloc_PageHeap::initializeScavenger()
{
- m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
- m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
- dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeTimerDelayInSeconds * NSEC_PER_SEC);
- dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeTimerDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
- dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
- m_scavengingScheduled = false;
+ m_scavengeQueueTimer = 0;
+}
+
+ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
+{
+ ASSERT(IsHeld(pageheap_lock));
+ return !m_scavengeQueueTimer;
+}
+
+ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
+{
+ // We need to use WT_EXECUTEONLYONCE here and reschedule the timer, because
+ // Windows will fire the timer event even when the function is already running.
+ ASSERT(IsHeld(pageheap_lock));
+ CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, kScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE);
+}
+
+ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
+{
+ // We must delete the timer and create it again, because it is not possible to retrigger a timer on Windows.
+ suspendScavenger();
+ scheduleScavenger();
+}
+
+ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
+{
+ ASSERT(IsHeld(pageheap_lock));
+ HANDLE scavengeQueueTimer = m_scavengeQueueTimer;
+ m_scavengeQueueTimer = 0;
+ DeleteTimerQueueTimer(0, scavengeQueueTimer, 0);
+}
+
+#else
+
+void TCMalloc_PageHeap::initializeScavenger()
+{
+ // Create a non-recursive mutex.
+#if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT
+ pthread_mutex_init(&m_scavengeMutex, 0);
+#else
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
+
+ pthread_mutex_init(&m_scavengeMutex, &attr);
+
+ pthread_mutexattr_destroy(&attr);
+#endif
+
+ pthread_cond_init(&m_scavengeCondition, 0);
+ m_scavengeThreadActive = true;
+ pthread_t thread;
+ pthread_create(&thread, 0, runScavengerThread, this);
+}
+
+void* TCMalloc_PageHeap::runScavengerThread(void* context)
+{
+ static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
+#if (COMPILER(MSVC) || COMPILER(SUNCC))
+ // Without this, Visual Studio and Sun Studio will complain that this method does not return a value.
+ return 0;
+#endif
}
ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
{
- if (!m_scavengingScheduled && shouldContinueScavenging()) {
- m_scavengingScheduled = true;
- dispatch_resume(m_scavengeTimer);
- }
+ // m_scavengeMutex should be held before accessing m_scavengeThreadActive.
+ ASSERT(pthread_mutex_trylock(m_scavengeMutex));
+ if (!m_scavengeThreadActive && shouldScavenge())
+ pthread_cond_signal(&m_scavengeCondition);
}
#endif
-void TCMalloc_PageHeap::scavenge()
+void TCMalloc_PageHeap::scavenge()
{
- // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
- // for the amount of allocations that we do. So hold off on releasing memory back to the system.
- if (pages_committed_since_last_scavenge_ > 0) {
- pages_committed_since_last_scavenge_ = 0;
- return;
- }
- Length pagesDecommitted = 0;
- for (int i = kMaxPages; i >= 0; i--) {
- SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
- if (!DLL_IsEmpty(&slist->normal)) {
- // Release the last span on the normal portion of this list
- Span* s = slist->normal.prev;
- // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
- if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_)
- continue;
- DLL_Remove(s);
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
- if (!s->decommitted) {
- pagesDecommitted += s->length;
- s->decommitted = true;
+ size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
+ size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
+
+ Length lastFreeCommittedPages = free_committed_pages_;
+ while (free_committed_pages_ > targetPageCount) {
+ ASSERT(Check());
+ for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) {
+ SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
+ // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span.
+ // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left.
+ size_t length = DLL_Length(&slist->normal);
+ size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2;
+ for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal) && free_committed_pages_ > targetPageCount; j++) {
+ Span* s = slist->normal.prev;
+ DLL_Remove(s);
+ ASSERT(!s->decommitted);
+ if (!s->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ ASSERT(free_committed_pages_ >= s->length);
+ free_committed_pages_ -= s->length;
+ s->decommitted = true;
+ }
+ DLL_Prepend(&slist->returned, s);
}
- DLL_Prepend(&slist->returned, s);
- // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
- if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted)
- break;
}
+
+ if (lastFreeCommittedPages == free_committed_pages_)
+ break;
+ lastFreeCommittedPages = free_committed_pages_;
}
- pages_committed_since_last_scavenge_ = 0;
- ASSERT(free_committed_pages_ >= pagesDecommitted);
- free_committed_pages_ -= pagesDecommitted;
+
+ min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
}
-ALWAYS_INLINE bool TCMalloc_PageHeap::shouldContinueScavenging() const
+ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
{
return free_committed_pages_ > kMinimumFreeCommittedPageCount;
}
Span* result = ll->next;
Carve(result, n, released);
- if (result->decommitted) {
- TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
- result->decommitted = false;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- pages_committed_since_last_scavenge_ += n;
-#endif
- }
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- else {
- // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
- // free committed pages count.
- ASSERT(free_committed_pages_ >= n);
- free_committed_pages_ -= n;
- }
+ // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
+ // free committed pages count.
+ ASSERT(free_committed_pages_ >= n);
+ free_committed_pages_ -= n;
+ if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
+ min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(Check());
free_pages_ -= n;
if (best != NULL) {
Carve(best, n, from_released);
- if (best->decommitted) {
- TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
- best->decommitted = false;
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- pages_committed_since_last_scavenge_ += n;
-#endif
- }
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- else {
- // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
- // free committed pages count.
- ASSERT(free_committed_pages_ >= n);
- free_committed_pages_ -= n;
- }
+ // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
+ // free committed pages count.
+ ASSERT(free_committed_pages_ >= n);
+ free_committed_pages_ -= n;
+ if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
+ min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
ASSERT(Check());
free_pages_ -= n;
return leftover;
}
-static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source)
-{
- destination->decommitted = source->decommitted;
-}
-
inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
ASSERT(n > 0);
DLL_Remove(span);
span->free = 0;
Event(span, 'A', n);
+ if (released) {
+ // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
+ ASSERT(span->decommitted);
+ TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
+ span->decommitted = false;
+#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
+ free_committed_pages_ += span->length;
+#endif
+ }
+
const int extra = static_cast<int>(span->length - n);
ASSERT(extra >= 0);
if (extra > 0) {
Span* leftover = NewSpan(span->start + n, extra);
leftover->free = 1;
- propagateDecommittedState(leftover, span);
+ leftover->decommitted = false;
Event(leftover, 'S', extra);
RecordSpan(leftover);
// Place leftover span on appropriate free list
SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
- Span* dst = released ? &listpair->returned : &listpair->normal;
+ Span* dst = &listpair->normal;
DLL_Prepend(dst, leftover);
span->length = n;
// If the merged span is decommitted, that means we decommitted any neighboring spans that were
// committed. Update the free committed pages count.
free_committed_pages_ -= neighboringCommittedSpansLength;
+ if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
+ min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
} else {
// If the merged span remains committed, add the deleted span's size to the free committed pages count.
free_committed_pages_ += n;
}
ask = actual_size >> kPageShift;
-#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- pages_committed_since_last_scavenge_ += ask;
-#endif
-
uint64_t old_system_bytes = system_bytes_;
system_bytes_ += (ask << kPageShift);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
bool TCMalloc_PageHeap::Check() {
ASSERT(free_[0].normal.next == &free_[0].normal);
ASSERT(free_[0].returned.next == &free_[0].returned);
- CheckList(&large_.normal, kMaxPages, 1000000000);
- CheckList(&large_.returned, kMaxPages, 1000000000);
+ CheckList(&large_.normal, kMaxPages, 1000000000, false);
+ CheckList(&large_.returned, kMaxPages, 1000000000, true);
for (Length s = 1; s < kMaxPages; s++) {
- CheckList(&free_[s].normal, s, s);
- CheckList(&free_[s].returned, s, s);
+ CheckList(&free_[s].normal, s, s, false);
+ CheckList(&free_[s].returned, s, s, true);
}
return true;
}
#if ASSERT_DISABLED
-bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {
+bool TCMalloc_PageHeap::CheckList(Span*, Length, Length, bool) {
return true;
}
#else
-bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
+bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted) {
for (Span* s = list->next; s != list; s = s->next) {
CHECK_CONDITION(s->free);
CHECK_CONDITION(s->length >= min_pages);
CHECK_CONDITION(s->length <= max_pages);
CHECK_CONDITION(GetDescriptor(s->start) == s);
CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
+ CHECK_CONDITION(s->decommitted == decommitted);
}
return true;
}
while (!DLL_IsEmpty(list)) {
Span* s = list->prev;
DLL_Remove(s);
+ s->decommitted = true;
DLL_Prepend(returned, s);
TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
static_cast<size_t>(s->length << kPageShift));
template <class Finder, class Reader>
void enumerateFreeObjects(Finder& finder, const Reader& reader)
{
- for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
+ for (void* nextObject = list_; nextObject; nextObject = reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))
finder.visit(nextObject);
}
#endif
class TCMalloc_ThreadCache {
private:
typedef TCMalloc_ThreadCache_FreeList FreeList;
-#if COMPILER(MSVC)
+#if OS(WINDOWS)
typedef DWORD ThreadIdentifier;
#else
typedef pthread_t ThreadIdentifier;
// Total byte size in cache
size_t Size() const { return size_; }
- void* Allocate(size_t size);
+ ALWAYS_INLINE void* Allocate(size_t size);
void Deallocate(void* ptr, size_t size_class);
- void FetchFromCentralCache(size_t cl, size_t allocationSize);
+ ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize);
void ReleaseToCentralCache(size_t cl, int N);
void Scavenge();
void Print() const;
Span* remoteSpan = nonempty_.next;
for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {
- for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
+ for (void* nextObject = span->objects; nextObject; nextObject = reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))
finder.visit(nextObject);
}
}
// REQUIRES: lock_ is held
// Release an object to spans.
// May temporarily release lock_.
- void ReleaseToSpans(void* object);
+ ALWAYS_INLINE void ReleaseToSpans(void* object);
// REQUIRES: lock_ is held
// Populate cache by fetching from the page heap.
// May temporarily release lock_.
- void Populate();
+ ALWAYS_INLINE void Populate();
// REQUIRES: lock is held.
// Tries to make room for a TCEntry. If the cache is full it will try to
// just iterates over the sizeclasses but does so without taking a lock.
// Returns true on success.
// May temporarily lock a "random" size class.
- static bool EvictRandomSizeClass(size_t locked_size_class, bool force);
+ static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force);
// REQUIRES: lock_ is *not* held.
// Tries to shrink the Cache. If force is true it will relase objects to
static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
// Page-level allocator
-static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
-
-#if PLATFORM(ARM)
-static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)] __attribute__((aligned));
-#else
-static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)];
-#endif
+static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)];
static bool phinited = false;
// Avoid extra level of indirection by making "pageheap" be just an alias
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-#if !HAVE(DISPATCH_H)
-#if OS(WINDOWS)
-static void sleep(unsigned seconds)
+#if HAVE(DISPATCH_H) || OS(WINDOWS)
+
+void TCMalloc_PageHeap::periodicScavenge()
{
- ::Sleep(seconds * 1000);
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->scavenge();
+
+ if (shouldScavenge()) {
+ rescheduleScavenger();
+ return;
+ }
+
+ suspendScavenger();
}
-#endif
+
+ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
+{
+ ASSERT(pageheap_lock.IsHeld());
+ if (isScavengerSuspended() && shouldScavenge())
+ scheduleScavenger();
+}
+
+#else
void TCMalloc_PageHeap::scavengerThread()
{
#endif
while (1) {
- if (!shouldContinueScavenging()) {
+ if (!shouldScavenge()) {
pthread_mutex_lock(&m_scavengeMutex);
m_scavengeThreadActive = false;
- // Block until there are enough freed pages to release back to the system.
+ // Block until there are enough free committed pages to release back to the system.
pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
m_scavengeThreadActive = true;
pthread_mutex_unlock(&m_scavengeMutex);
}
- sleep(kScavengeTimerDelayInSeconds);
+ sleep(kScavengeDelayInSeconds);
{
SpinLockHolder h(&pageheap_lock);
pageheap->scavenge();
}
}
-#else
-
-void TCMalloc_PageHeap::periodicScavenge()
-{
- {
- SpinLockHolder h(&pageheap_lock);
- pageheap->scavenge();
- }
-
- if (!shouldContinueScavenging()) {
- m_scavengingScheduled = false;
- dispatch_suspend(m_scavengeTimer);
- }
-}
-#endif // HAVE(DISPATCH_H)
+#endif
#endif
// Therefore, we use TSD keys only after tsd_inited is set to true.
// Until then, we use a slow path to get the heap object.
static bool tsd_inited = false;
+#if USE(PTHREAD_GETSPECIFIC_DIRECT)
+static const pthread_key_t heap_key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0;
+#else
static pthread_key_t heap_key;
-#if COMPILER(MSVC)
+#endif
+#if OS(WINDOWS)
DWORD tlsIndex = TLS_OUT_OF_INDEXES;
#endif
static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
{
- // still do pthread_setspecific when using MSVC fast TLS to
- // benefit from the delete callback.
+#if USE(PTHREAD_GETSPECIFIC_DIRECT)
+ // Can't have two libraries both doing this in the same process,
+ // so check and make this crash right away.
+ if (pthread_getspecific(heap_key))
+ CRASH();
+#endif
+
+ // Still do pthread_setspecific even if there's an alternate form
+ // of thread-local storage in use, to benefit from the delete callback.
pthread_setspecific(heap_key, heap);
-#if COMPILER(MSVC)
+
+#if OS(WINDOWS)
TlsSetValue(tlsIndex, heap);
#endif
}
if (span) pageheap->RegisterSizeClass(span, size_class_);
}
if (span == NULL) {
+#if HAVE(ERRNO_H)
MESSAGE("allocation failed: %d\n", errno);
+#elif OS(WINDOWS)
+ MESSAGE("allocation failed: %d\n", ::GetLastError());
+#else
+ MESSAGE("allocation failed\n");
+#endif
lock_.Lock();
return;
}
char* nptr;
while ((nptr = ptr + size) <= limit) {
*tail = ptr;
- tail = reinterpret_cast<void**>(ptr);
+ tail = reinterpret_cast_ptr<void**>(ptr);
ptr = nptr;
num++;
}
// __thread is faster, but only when the kernel supports it
if (KernelSupportsTLS())
return threadlocal_heap;
-#elif COMPILER(MSVC)
+#elif OS(WINDOWS)
return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
#else
return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
void TCMalloc_ThreadCache::InitTSD() {
ASSERT(!tsd_inited);
+#if USE(PTHREAD_GETSPECIFIC_DIRECT)
+ pthread_key_init_np(heap_key, DestroyThreadCache);
+#else
pthread_key_create(&heap_key, DestroyThreadCache);
-#if COMPILER(MSVC)
+#endif
+#if OS(WINDOWS)
tlsIndex = TlsAlloc();
#endif
tsd_inited = true;
-#if !COMPILER(MSVC)
+#if !OS(WINDOWS)
// We may have used a fake pthread_t for the main thread. Fix it.
pthread_t zero;
memset(&zero, 0, sizeof(zero));
ASSERT(pageheap_lock.IsHeld());
#endif
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
-#if COMPILER(MSVC)
+#if OS(WINDOWS)
if (h->tid_ == 0) {
h->tid_ = GetCurrentThreadId();
}
{
SpinLockHolder h(&pageheap_lock);
-#if COMPILER(MSVC)
+#if OS(WINDOWS)
DWORD me;
if (!tsd_inited) {
me = 0;
// In that case, the heap for this thread has already been created
// and added to the linked list. So we search for that first.
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
-#if COMPILER(MSVC)
+#if OS(WINDOWS)
if (h->tid_ == me) {
#else
if (pthread_equal(h->tid_, me)) {
if (heap->in_setspecific_) return; // Do not disturb the active caller
heap->in_setspecific_ = true;
- pthread_setspecific(heap_key, NULL);
+ setThreadHeap(NULL);
#ifdef HAVE_TLS
// Also update the copy in __thread
threadlocal_heap = NULL;
#define do_malloc do_malloc<crashOnFailure>
template <bool crashOnFailure>
-void* malloc(size_t);
+ALWAYS_INLINE void* malloc(size_t);
void* fastMalloc(size_t size)
{
ALWAYS_INLINE
#endif
void* malloc(size_t size) {
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= size) // If overflow would occur...
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= size) // If overflow would occur...
return 0;
- size += sizeof(AllocAlignmentInteger);
- void* result = do_malloc(size);
+ void* result = do_malloc(size + Internal::ValidationBufferSize);
if (!result)
return 0;
- *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
- result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*>(result);
+ header->m_size = size;
+ header->m_type = Internal::AllocTypeMalloc;
+ header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix);
+ result = header + 1;
+ *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix;
+ fastMallocValidate(result);
#else
void* result = do_malloc(size);
#endif
MallocHook::InvokeDeleteHook(ptr);
#endif
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
+#if ENABLE(WTF_MALLOC_VALIDATION)
if (!ptr)
return;
- AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(ptr);
- if (*header != Internal::AllocTypeMalloc)
- Internal::fastMallocMatchFailed(ptr);
+ fastMallocValidate(ptr);
+ Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(ptr);
+ memset(ptr, 0xCC, header->m_size);
do_free(header);
#else
do_free(ptr);
extern "C"
#else
template <bool crashOnFailure>
-void* calloc(size_t, size_t);
+ALWAYS_INLINE void* calloc(size_t, size_t);
void* fastCalloc(size_t n, size_t elem_size)
{
- return calloc<true>(n, elem_size);
+ void* result = calloc<true>(n, elem_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ fastMallocValidate(result);
+#endif
+ return result;
}
TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
{
- return calloc<false>(n, elem_size);
+ void* result = calloc<false>(n, elem_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ fastMallocValidate(result);
+#endif
+ return result;
}
template <bool crashOnFailure>
if (n > 1 && elem_size && (totalBytes / elem_size) != n)
return 0;
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
- return 0;
-
- totalBytes += sizeof(AllocAlignmentInteger);
- void* result = do_malloc(totalBytes);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ void* result = malloc<crashOnFailure>(totalBytes);
if (!result)
return 0;
memset(result, 0, totalBytes);
- *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
- result = static_cast<AllocAlignmentInteger*>(result) + 1;
+ fastMallocValidate(result);
#else
void* result = do_malloc(totalBytes);
if (result != NULL) {
extern "C"
#else
template <bool crashOnFailure>
-void* realloc(void*, size_t);
+ALWAYS_INLINE void* realloc(void*, size_t);
void* fastRealloc(void* old_ptr, size_t new_size)
{
- return realloc<true>(old_ptr, new_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ fastMallocValidate(old_ptr);
+#endif
+ void* result = realloc<true>(old_ptr, new_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ fastMallocValidate(result);
+#endif
+ return result;
}
TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
{
- return realloc<false>(old_ptr, new_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ fastMallocValidate(old_ptr);
+#endif
+ void* result = realloc<false>(old_ptr, new_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ fastMallocValidate(result);
+#endif
+ return result;
}
template <bool crashOnFailure>
#endif
void* realloc(void* old_ptr, size_t new_size) {
if (old_ptr == NULL) {
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- void* result = malloc(new_size);
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ void* result = malloc<crashOnFailure>(new_size);
#else
void* result = do_malloc(new_size);
#ifndef WTF_CHANGES
return NULL;
}
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= new_size) // If overflow would occur...
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= new_size) // If overflow would occur...
return 0;
- new_size += sizeof(AllocAlignmentInteger);
- AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(old_ptr);
- if (*header != Internal::AllocTypeMalloc)
- Internal::fastMallocMatchFailed(old_ptr);
+ Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(old_ptr);
+ fastMallocValidate(old_ptr);
old_ptr = header;
+ header->m_size = new_size;
+ new_size += Internal::ValidationBufferSize;
#endif
// Get the size of the old entry
// that we already know the sizeclass of old_ptr. The benefit
// would be small, so don't bother.
do_free(old_ptr);
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- new_ptr = static_cast<AllocAlignmentInteger*>(new_ptr) + 1;
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ new_ptr = static_cast<Internal::ValidationHeader*>(new_ptr) + 1;
+ *Internal::fastMallocValidationSuffix(new_ptr) = Internal::ValidationSuffix;
#endif
return new_ptr;
} else {
-#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
- old_ptr = static_cast<AllocAlignmentInteger*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ old_ptr = static_cast<Internal::ValidationHeader*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
+ *Internal::fastMallocValidationSuffix(old_ptr) = Internal::ValidationSuffix;
#endif
return old_ptr;
}
}
}
+#if ENABLE(GLOBAL_FASTMALLOC_NEW)
+
void* operator new(size_t size) {
void* p = cpp_alloc(size, false);
// We keep this next instruction out of cpp_alloc for a reason: when
do_free(p);
}
+#endif
+
extern "C" void* memalign(size_t align, size_t size) __THROW {
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
#endif
-#if defined(WTF_CHANGES) && OS(DARWIN)
+#ifdef WTF_CHANGES
+void releaseFastMallocFreeMemory()
+{
+ // Flush free pages in the current thread cache back to the page heap.
+ // Low watermark mechanism in Scavenge() prevents full return on the first pass.
+ // The second pass flushes everything.
+ if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
+ threadCache->Scavenge();
+ threadCache->Scavenge();
+ }
+
+ SpinLockHolder h(&pageheap_lock);
+ pageheap->ReleaseFreePages();
+}
+
+FastMallocStatistics fastMallocStatistics()
+{
+ FastMallocStatistics statistics;
+
+ SpinLockHolder lockHolder(&pageheap_lock);
+ statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
+ statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
+
+ statistics.freeListBytes = 0;
+ for (unsigned cl = 0; cl < kNumClasses; ++cl) {
+ const int length = central_cache[cl].length();
+ const int tc_length = central_cache[cl].tc_length();
+
+ statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
+ }
+ for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
+ statistics.freeListBytes += threadCache->Size();
+
+ return statistics;
+}
+
+size_t fastMallocSize(const void* ptr)
+{
+#if ENABLE(WTF_MALLOC_VALIDATION)
+ return Internal::fastMallocValidationHeader(const_cast<void*>(ptr))->m_size;
+#else
+ const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
+ Span* span = pageheap->GetDescriptorEnsureSafe(p);
+
+ if (!span || span->free)
+ return 0;
+
+ for (void* free = span->objects; free != NULL; free = *((void**) free)) {
+ if (ptr == free)
+ return 0;
+ }
+
+ if (size_t cl = span->sizeclass)
+ return ByteSizeForClass(cl);
+
+ return span->length << kPageShift;
+#endif
+}
+
+#if OS(DARWIN)
class FreeObjectFinder {
const RemoteMemoryReader& m_reader;
return 1;
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
+ if (!span)
+ return 1;
+
if (span->free) {
void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
m_freeObjectFinder.visit(ptr);
} else if (span->sizeclass) {
// Walk the free list of the small-object span, keeping track of each object seen
- for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject)))
+ for (void* nextObject = span->objects; nextObject; nextObject = m_reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))
m_freeObjectFinder.visit(nextObject);
}
return span->length;
return 1;
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
- if (!span->start)
+ if (!span || !span->start)
return 1;
if (m_seenPointers.contains(ptr))
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
&FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
-#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) || PLATFORM(IPHONE)
, 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
-#endif
+ , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
};
}
static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
}
-#endif
-
-#if WTF_CHANGES
-void releaseFastMallocFreeMemory()
-{
- // Flush free pages in the current thread cache back to the page heap.
- // Low watermark mechanism in Scavenge() prevents full return on the first pass.
- // The second pass flushes everything.
- if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
- threadCache->Scavenge();
- threadCache->Scavenge();
- }
-
- SpinLockHolder h(&pageheap_lock);
- pageheap->ReleaseFreePages();
-}
-
-FastMallocStatistics fastMallocStatistics()
-{
- FastMallocStatistics statistics;
- {
- SpinLockHolder lockHolder(&pageheap_lock);
- statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes());
- statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes());
- statistics.returnedSize = pageheap->ReturnedBytes();
- statistics.freeSizeInCaches = 0;
- for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
- statistics.freeSizeInCaches += threadCache->Size();
- }
- for (unsigned cl = 0; cl < kNumClasses; ++cl) {
- const int length = central_cache[cl].length();
- const int tc_length = central_cache[cl].tc_length();
- statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length);
- }
- return statistics;
-}
+#endif // OS(DARWIN)
} // namespace WTF
-#endif
+#endif // WTF_CHANGES
#endif // FORCE_SYSTEM_MALLOC